Merge pull request #1 from apache/master

Merge from origin branch
diff --git a/KEYS b/KEYS
index 355fecb..3cc4efc 100644
--- a/KEYS
+++ b/KEYS
@@ -859,3 +859,62 @@
 kiu+N7o3N68zHTH+fh4=
 =EHog
 -----END PGP PUBLIC KEY BLOCK-----
+pub   rsa4096 2020-06-02 [SC] [expires: 2022-06-02]
+      603B397B5E20757D18BE6C91F3B965A6B192DAB7
+uid           [ultimate] Bharath Kumarasubramanian <bharathkk@apache.org>
+sig 3        F3B965A6B192DAB7 2020-06-02  Bharath Kumarasubramanian <bharathkk@apache.org>
+sub   rsa4096 2020-06-02 [E] [expires: 2022-06-02]
+sig          F3B965A6B192DAB7 2020-06-02  Bharath Kumarasubramanian <bharathkk@apache.org>
+
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQINBF7VqQMBEADBHEVwT6sRsE600Xai+vWvfHntpBp40v1VCCvxUHDGfYRHnIju
+kGDID25SaTcHiQx+imvWA53k6alu9Ues5xzEJVEUiY+l/Yfu9aLfj9ibZCVWc23g
+F0iypHaa5pP/G9BN38gnWiyndI0+Fdy9cmeJUoQ13xhLxDZx89b1BffAQ02We3Fg
+v24xsBC5zdtgLDmXjmX5oSLCFJu6fJSgbqiL04YczzvdSgEtxo8lKubp1GzWBzKj
+X6eKJ6+tLQ3fNu+qbSmGMABgrIHjEV7mKUUy9fAk3noTO5Mnowx+fI/owvRbREzj
+rBXowDKkuJ5UHEn69gtuHbmeLhH8NcIFTPNVW6QtDPiJlH8fUxlGihodBjwAz1ug
+WCalDxRrVGu1hCSus6QmVUrerlzMhVfhlKolZAFEpXD1wM7iEgsxTl+8tA9Eisni
+ImOBxMaQ1LJxfE5QYSYHX0/wQUy8ejdWQXiqjp59/FcwAJLaHRsJWr8C44yl6r6/
+cfGEoeuLh3VFX9E5HbOeygQr72fNwVYbNS82QJFWuf4TbESkqo7hXm7vg8N0nWiQ
+W5Unyi2SkejhNJIv4vQLtK3n1snv4Q3OxBhIgy9r1Yxh0vhO8knshLpxt4O2siUN
+Yj/cycqqFKo6NJkC2tumY7nCN3EUEc/7UOPd72flgEJnKmWzhEIrS9D03wARAQAB
+tDBCaGFyYXRoIEt1bWFyYXN1YnJhbWFuaWFuIDxiaGFyYXRoa2tAYXBhY2hlLm9y
+Zz6JAlQEEwEIAD4WIQRgOzl7XiB1fRi+bJHzuWWmsZLatwUCXtWpAwIbAwUJA8Jn
+AAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRDzuWWmsZLat2JcD/92CPVPaXTQ
+iUlHDjwEYDhVpZkWYCBT3rr0z21Sp/ZlPkKEi0Oy8VBVwj32v83bOTMXYlFe8PgR
+Ng2UwM9zoaMQ7ZhAoD9NU+dRo+gdUudJhXAuiR8QpYsnJxl4OmoDzfjyVOTKx1lb
+hdjYYx4rNzOPw6UWxadeMnYlMAsOOa6kAoRRb4hJCtM9d7hTGQhBXQhKgWeAED1+
+tvygucO0/iLCRDRk5Oaawn7o6WXjcsZGlzKpZWYwzHVfnq8hr7Ba4GkCCI4kg4Il
+kaphsA1K1oTyU+e73smmLKT8O9dU+6ZHO2gfY+5MVxFvh14dpy88bwaSMDYi0BRo
+U5vy493d4MsE1Aqm5bbVplzcVblF04B/hBn4WdxjroCl/ocNea/JWQdJRAXb4p6V
+qzZqGAi3vKgBdcPLg74EExRuATtF0P41VxOh7/QmdfFLjATrj27evSkvYd8L8EGN
+XzSeA/XZJRN9jlcLQ+j4kqP2o2v10soxS2HguHZsmY2ix10ADCE+UsLTFgYN4M3Q
+xHMvJAHDWhN+I1bNmGAKNMbS4cADVmHf3RHUYl2AzlU8Gl1qI/Aw5KOMi7jB4RFY
+boxFF39TCB4S7GkpKAWA6N9Wh2uZv2jbtjCJSoCYkqlF6WcBVQZNOE5WFwDyxDOh
+vfpjxZRXJqb369s2x3Y2i2vQj5d6VvDwC7kCDQRe1akDARAAtRQAIMgrd7LHiQph
+AEiWBcj2PZ1HaSLczcMA+GaRi1ZDoYJyLDvxiiYRmhEeOqlZfpYQSLqOq26HSIRH
+TF3g4/m7Xhocotge6XDG3wETnLjZJqRorEd4kjcJLqyLEi466xw6oti2TKyeqHst
+cbTwa6RkibHz5FlsYjwVmoKsQFExL9HOufgpnYF2C2lQ1RJinnaMA5a8mhIRjFOT
+pIQv3Bmaq2X8CrO3kAmM1rxh7XZ7j/mv73j0AqTqIiBKZcWe6K9+3Lf0ErVuC0lW
+Z/ewxVMSZmrbS1G29uH3muh4Je9Zo/XymverLxaXXEgbZx+M7883Ee3Xd/+wKZoT
+xSaOBMiCAHTi97KE59ma5StQSyN3KGu78AKSmcgauJUiorm+7eGDiS6ZH16E6pC5
+5A3idFVmNRwDH+UQCzDjj2bPJvlB7R284HYh8kbPV3vdQ2hA+RXOqEnO47utIwn7
+aAn1Z4PpCwsObVZ2udYwJV+QXzfkvrsToU1srZygK3J0vJ//3z9iHKhyXpphG5nk
+2B0CG0cXYZiCZEEQVkCG+DTOHcUk9NBEh5BCB3xWjWplT8A4ofKIdITELFX/Cbzb
+Fg3EvxUjIdP7s/V6M5GsBmmrgLsVDjLX66xLihSIanjG3MXYhHJhG23HMdMiJBHs
+DnYIL2EaAHTIp1+pa2jybr+4k3UAEQEAAYkCPAQYAQgAJhYhBGA7OXteIHV9GL5s
+kfO5Zaaxktq3BQJe1akDAhsMBQkDwmcAAAoJEPO5Zaaxktq3BYoQALbKlXz7dU+7
+ewTIFIicVN+a1nFW2tdCZD9tAQKc1Uh6Q2YbQvbEbuCWVEuB2NF2UbXMHrgmjc68
+HtZOkHRLd2yt5BxMD4ruyaDCiG9TUlORIhAh1qdw84UtiM7C6IFptIXz0VDLOhko
+YYowv4mgaccx9kdEGel6gUb24oJuL7lv5V/2R17IVvIcpomeZwqTMraUUGxYsSdy
+eckmtWjhbCLT9FqsNBEsjwvP8Md9EgpEaRvnNgxaKKNIuzMLGrR+zez9vPTv4u48
+Q1EgPU7DJxoKmpGkzs72PfXAmiRY+Qw5SeAy8r9axdq3YV+Ylf6TSfPG9DjeFXK5
+xytOt0ffyJzjbajrY0tlgNXKsa1Q5qB7tuYvT54L5KU8qcn57An7qcJZ3U/36G2d
+HwIA+HyYEXWJhRZyCFB3qU8ux72yoJh1RQTgX4LfCwQJR4D78KuSK9EICM1UHPVZ
+A1z6G50/IUJucv5lhZRlraH0p/FQ23PtdflnOvz0Qzge/0wcoEzlSvZDSOf0ExD/
+ZT6JtXJZhMd39lNhESsu0caOhTJ97l2wWeVkP804/ftst9k8M6Z2OUL8n+rv6kgq
+VIKHuMr6Y25hGISQ/8FK8kGB3nu5VDNZ0aLsaZdq5izIBHADT6WN3dlIMrk66kzq
+VcSdqZkA/iKbC7Fv6xS7fTmhnLmAJ40h
+=Ja6V
+-----END PGP PUBLIC KEY BLOCK-----
diff --git a/README.md b/README.md
index ec1cd94..e5c91a4 100644
--- a/README.md
+++ b/README.md
@@ -62,15 +62,15 @@
 
 To run a job (defined in a properties file):
 
-    ./gradlew samza-shell:runJob -PconfigPath=file:///path/to/job/config.properties
+    ./gradlew samza-shell:runJob -PconfigPath=/path/to/job/config.properties
 
 To inspect a job's latest checkpoint:
 
-    ./gradlew samza-shell:checkpointTool -PconfigPath=file:///path/to/job/config.properties
+    ./gradlew samza-shell:checkpointTool -PconfigPath=/path/to/job/config.properties
 
 To modify a job's checkpoint (assumes that the job is not currently running), give it a file with the new offset for each partition, in the format `systems.<system>.streams.<topic>.partitions.<partition>=<offset>`:
 
-    ./gradlew samza-shell:checkpointTool -PconfigPath=file:///path/to/job/config.properties \
+    ./gradlew samza-shell:checkpointTool -PconfigPath=/path/to/job/config.properties \
         -PnewOffsets=file:///path/to/new/offsets.properties
 
 ### Developers
diff --git a/bin/setup-int-test.sh b/bin/setup-int-test.sh
index 130be5d..d0fa832 100755
--- a/bin/setup-int-test.sh
+++ b/bin/setup-int-test.sh
@@ -43,7 +43,7 @@
 # Start the jobs
 for job in checker joiner emitter watcher
 do
-    $SAMZA_DIR/bin/run-job.sh --config job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory --config job.config.loader.properties.path=$SAMZA_DIR/config/join/$job.samza --config job.foo=$job
+    $SAMZA_DIR/bin/run-app.sh --config-path=$SAMZA_DIR/config/join/$job.samza --config job.foo=$job
 done
 
 
diff --git a/build.gradle b/build.gradle
index 47e134e..e30776a 100644
--- a/build.gradle
+++ b/build.gradle
@@ -119,6 +119,12 @@
   apply plugin: 'eclipse'
   apply plugin: 'project-report'
   apply plugin: 'jacoco'
+  apply plugin: 'checkstyle'
+
+  checkstyle {
+    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
+    toolVersion = "$checkstyleVersion"
+  }
 
   tasks.withType(ScalaCompile) {
     // show compile errors in console output
@@ -139,7 +145,6 @@
 }
 
 project(':samza-api') {
-  apply plugin: 'checkstyle'
   apply plugin: 'java'
 
   dependencies {
@@ -152,15 +157,10 @@
     testCompile "junit:junit:$junitVersion"
     testCompile "org.mockito:mockito-core:$mockitoVersion"
   }
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
 }
 
 project(":samza-core_$scalaSuffix") {
   apply plugin: 'scala'
-  apply plugin: 'checkstyle'
 
   // Force scala joint compilation
   sourceSets.main.scala.srcDir "src/main/java"
@@ -207,11 +207,6 @@
     testRuntime "org.apache.logging.log4j:log4j-slf4j-impl:$log4j2Version"
   }
 
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
-
   test {
     // some unit tests use embedded zookeeper, so adding some extra memory for those
     maxHeapSize = "1560m"
@@ -221,7 +216,6 @@
 
 project(":samza-azure_$scalaSuffix") {
   apply plugin: 'java'
-  apply plugin: 'checkstyle'
 
   dependencies {
     compile "com.azure:azure-storage-blob:12.0.1"
@@ -239,15 +233,10 @@
     testCompile "org.powermock:powermock-core:$powerMockVersion"
     testCompile "org.powermock:powermock-module-junit4:$powerMockVersion"
   }
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
 }
 
 project(":samza-aws_$scalaSuffix") {
   apply plugin: 'java'
-  apply plugin: 'checkstyle'
 
   dependencies {
     compile "com.amazonaws:aws-java-sdk-kinesis:1.11.152"
@@ -269,11 +258,6 @@
       url "https://repo1.maven.org/maven2/"
     }
   }
-
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
 }
 
 project(":samza-elasticsearch_$scalaSuffix") {
@@ -308,6 +292,7 @@
     compile project(":samza-kv-rocksdb_$scalaSuffix")
     compile "org.apache.avro:avro:$avroVersion"
     compile "org.apache.calcite:calcite-core:$calciteVersion"
+    compile "org.codehaus.janino:commons-compiler:3.0.11"
     compile "org.slf4j:slf4j-api:$slf4jVersion"
     compile "org.reflections:reflections:0.9.10"
 
@@ -436,7 +421,6 @@
 
 project(":samza-log4j_$scalaSuffix") {
   apply plugin: 'java'
-  apply plugin: 'checkstyle'
 
   dependencies {
     compile "log4j:log4j:$log4jVersion"
@@ -446,16 +430,10 @@
     compile "org.codehaus.jackson:jackson-mapper-asl:$jacksonVersion"
     testCompile "junit:junit:$junitVersion"
   }
-
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
 }
 
 project(":samza-log4j2_$scalaSuffix") {
   apply plugin: 'java'
-  apply plugin: 'checkstyle'
 
   dependencies {
     compile "org.apache.logging.log4j:log4j-1.2-api:$log4j2Version"
@@ -467,11 +445,6 @@
     compile "org.codehaus.jackson:jackson-mapper-asl:$jacksonVersion"
     testCompile "junit:junit:$junitVersion"
   }
-
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
 }
 
 project(":samza-yarn_$scalaSuffix") {
@@ -574,40 +547,45 @@
   }
 
   // Usage: ./gradlew samza-shell:runJob \
-  //  -PconfigPath=file:///path/to/job/config.properties
+  //  -PconfigPath=/path/to/job/config.properties
   task runJob(type:JavaExec) {
     description 'To run a job (defined in a properties file)'
     main = 'org.apache.samza.job.JobRunner'
     classpath = configurations.gradleShell
-    if (project.hasProperty('configPath')) args += ['--config-path', configPath]
+    if (project.hasProperty('configPath')) args += [
+        '--config', 'job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory',
+        '--config', 'job.config.loader.properties.path=' + configPath]
     jvmArgs = ["-Dlog4j.configurationFile=file:src/main/resources/log4j2-console.xml"]
   }
 
   // Usage: ./gradlew samza-shell:checkpointTool \
-  //  -PconfigPath=file:///path/to/job/config.properties -PnewOffsets=file:///path/to/new/offsets.properties
+  //  -PconfigPath=/path/to/job/config.properties -PnewOffsets=/path/to/new/offsets.properties
   task checkpointTool(type:JavaExec) {
     description 'Command-line tool to inspect and manipulate the job’s checkpoint'
     main = 'org.apache.samza.checkpoint.CheckpointTool'
     classpath = configurations.gradleShell
-    if (project.hasProperty('configPath')) args += ['--config-path', configPath]
+    if (project.hasProperty('configPath')) args += [
+        '--config', 'job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory',
+        '--config', 'job.config.loader.properties.path=' + configPath]
     if (project.hasProperty('newOffsets')) args += ['--new-offsets', newOffsets]
     jvmArgs = ["-Dlog4j.configurationFile=file:src/main/resources/log4j2-console.xml"]
   }
 
   // Usage: ./gradlew samza-shell:kvPerformanceTest
-  //  -PconfigPath=file:///path/to/job/config.properties
+  //  -PconfigPath=/path/to/job/config.properties
   task kvPerformanceTest(type:JavaExec) {
     description 'Command-line tool to run key-value performance tests'
     main = 'org.apache.samza.test.performance.TestKeyValuePerformance'
     classpath = configurations.gradleShell
-    if (project.hasProperty('configPath')) args += ['--config-path', configPath]
+    if (project.hasProperty('configPath')) args += [
+        '--config', 'job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory',
+        '--config', 'job.config.loader.properties.path=' + configPath]
     jvmArgs = ["-Dlog4j.configurationFile=file:src/main/resources/log4j2-console.xml"]
   }
 }
 
 project(":samza-kv_$scalaSuffix") {
   apply plugin: 'scala'
-  apply plugin: 'checkstyle'
 
   // Force scala joint compilation
   sourceSets.main.scala.srcDir "src/main/java"
@@ -623,35 +601,22 @@
     compile project(':samza-api')
     compile project(":samza-core_$scalaSuffix")
     compile "org.scala-lang:scala-library:$scalaVersion"
+    testCompile "com.google.guava:guava:$guavaVersion"
     testCompile "junit:junit:$junitVersion"
     testCompile "org.mockito:mockito-core:$mockitoVersion"
   }
-
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
 }
 
 project(":samza-kv-inmemory_$scalaSuffix") {
-  apply plugin: 'scala'
-
-  // Force scala joint compilation
-  sourceSets.main.scala.srcDir "src/main/java"
-  sourceSets.test.scala.srcDir "src/test/java"
-
-  // Disable the Javac compiler by forcing joint compilation by scalac. This is equivalent to setting
-  // tasks.compileTestJava.enabled = false
-  sourceSets.main.java.srcDirs = []
-  sourceSets.test.java.srcDirs = []
+  apply plugin: 'java'
 
   dependencies {
     compile project(':samza-api')
     compile project(":samza-core_$scalaSuffix")
     compile project(":samza-kv_$scalaSuffix")
-    compile "org.scala-lang:scala-library:$scalaVersion"
     compile "com.google.guava:guava:$guavaVersion"
     testCompile "junit:junit:$junitVersion"
+    testCompile "org.mockito:mockito-core:$mockitoVersion"
   }
 }
 
@@ -679,29 +644,17 @@
 }
 
 project(":samza-kv-couchbase_$scalaSuffix") {
-  apply plugin: 'scala'
-
-  // Force scala joint compilation
-  sourceSets.main.scala.srcDir "src/main/java"
-  sourceSets.test.scala.srcDir "src/test/java"
-
-  // Disable the Javac compiler by forcing joint compilation by scalac. This is equivalent to setting
-  // tasks.compileTestJava.enabled = false
-  sourceSets.main.java.srcDirs = []
-  sourceSets.test.java.srcDirs = []
+  apply plugin: 'java'
 
   dependencies {
     compile project(':samza-api')
     compile project(":samza-core_$scalaSuffix")
-    compile project(":samza-kv_$scalaSuffix")
-    compile "org.scala-lang:scala-library:$scalaVersion"
     compile "com.couchbase.client:java-client:$couchbaseClientVersion"
     testCompile "junit:junit:$junitVersion"
     testCompile "org.mockito:mockito-core:$mockitoVersion"
     testCompile "org.powermock:powermock-api-mockito:$powerMockVersion"
     testCompile "org.powermock:powermock-core:$powerMockVersion"
     testCompile "org.powermock:powermock-module-junit4:$powerMockVersion"
-    testCompile "org.scalatest:scalatest_$scalaSuffix:$scalaTestVersion"
   }
 }
 
@@ -796,7 +749,6 @@
 
 project(":samza-test_$scalaSuffix") {
   apply plugin: 'scala'
-  apply plugin: 'checkstyle'
 
   // Force scala joint compilation
   sourceSets.main.scala.srcDir "src/main/java"
@@ -857,11 +809,6 @@
     jvmArgs = ["-XX:+UseConcMarkSweepGC", "-server"]
   }
 
-  checkstyle {
-    configFile = new File(rootDir, "checkstyle/checkstyle.xml")
-    toolVersion = "$checkstyleVersion"
-  }
-
   tasks.create(name: "releaseTestJobs", dependsOn: configurations.archives.artifacts, type: Tar) {
     description 'Build an integration test tarball'
     compression = Compression.GZIP
diff --git a/checkstyle/checkstyle-suppressions.xml b/checkstyle/checkstyle-suppressions.xml
index 5109c7e..e24bc88 100644
--- a/checkstyle/checkstyle-suppressions.xml
+++ b/checkstyle/checkstyle-suppressions.xml
@@ -1,8 +1,8 @@
 <?xml version="1.0"?>
 
 <!DOCTYPE suppressions PUBLIC
-        "-//Checkstyle//DTD SuppressionFilter Configuration 1.2//EN"
-        "https://checkstyle.org/dtds/suppressions_1_2.dtd">
+    "-//Puppy Crawl//DTD Suppressions 1.1//EN"
+    "http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
 
 <!--
      // Licensed to the Apache Software Foundation (ASF) under one or more
@@ -25,7 +25,21 @@
   <suppress checks="ConstantName"
             files="ApplicationStatus.java"
             lines="26-29"/>
+  <!--
+    API javadocs (including descriptors) may reference classes (using '{@link <className>}') which
+    aren't directly used in the API class. In order to help the API javadoc look cleaner, we can use
+    the simple class name in the javadoc and then import that class. However, for non-API code, we
+    want checkstyle prevent an import just for documentation purposes. The
+    "preventJavadocUnusedImports" id includes processing of javadocs when checking unused imports.
+    We will apply that to the API classes only.
+  -->
   <suppress id="preventJavadocUnusedImports"
             files=".*samza-api.*"/>
+  <suppress id="preventJavadocUnusedImports"
+            files="samza-kafka/src/main/java/org/apache/samza/system/kafka/descriptors" />
+
+  <!-- suppress avro schema classes since they are based on auto-generated code -->
+  <suppress files="samza-sql/src/test/java/org/apache/samza/sql/avro/schemas" checks=".*" />
+  <suppress files="samza-tools/src/main/java/org/apache/samza/tools/schemas" checks=".*" />
 </suppressions>
 
diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml
index e6d789c..ee493e6 100644
--- a/checkstyle/checkstyle.xml
+++ b/checkstyle/checkstyle.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <!DOCTYPE module PUBLIC
     "-//Puppy Crawl//DTD Check Configuration 1.3//EN"
-     "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
+    "http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
 <!--
 // Licensed to the Apache Software Foundation (ASF) under one or more
 // contributor license agreements.  See the NOTICE file distributed with
diff --git a/docs/_blog/2020-07-01-announcing-the-release-of-apache-samza--1.5.0.md b/docs/_blog/2020-07-01-announcing-the-release-of-apache-samza--1.5.0.md
new file mode 100644
index 0000000..ca1a502
--- /dev/null
+++ b/docs/_blog/2020-07-01-announcing-the-release-of-apache-samza--1.5.0.md
@@ -0,0 +1,144 @@
+---
+layout: blog
+title: Announcing the release of Apache Samza 1.5.0
+icon: git-pull-request
+authors:
+    - name: Bharath Kumarasubramanian
+      website:
+      image:
+excerpt_separator: <!--more-->
+---
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# **Announcing the release of Apache Samza 1.5.0**
+
+
+<!--more-->
+
+**IMPORTANT NOTE**: As noted in the last release, this release contains **backward incompatible changes regarding samza job submission**. Details can be found on [SEP-23: Simplify Job Runner](https://cwiki.apache.org/confluence/display/SAMZA/SEP-23%3A+Simplify+Job+Runner)
+
+We are thrilled to announce the release of Apache Samza 1.5.0.
+
+Today, Samza forms the backbone of hundreds of real-time production applications across a multitude of companies, such as LinkedIn, Slack, and Redfin, among many others. Samza provides leading support for large-scale stateful stream processing with:
+
+* First class support for local states (with RocksDB store). This allows a stateful application to scale up to 1.1 Million events/sec on a single machine with SSD.
+
+* Support for incremental checkpointing of state instead of full snapshots. This enables Samza to scale to applications with very large states.
+
+* A fully asynchronous programming model that makes parallelizing remote calls efficient and effortless.
+
+* High level API for expressing complex stream processing pipelines in a few lines of code.
+
+* Beam Samza Runner that marries Beam’s best in class support for EventTime based windowed processing and sophisticated triggering with Samza’s stable and scalable stateful processing model.
+
+* A fully pluggable model for input sources (e.g. Kafka, Kinesis, DynamoDB streams etc.) and output systems (HDFS, Kafka, ElastiCache etc.).
+
+* A Table API that provides a common abstraction for accessing remote or local databases and allows developers to “join” an input event stream with such a Table.
+
+* Flexible deployment model for running the applications in any hosting environment and with cluster managers other than YARN.
+
+### New Features, Upgrades and Bug Fixes:
+This release brings the following features, upgrades, and capabilities (highlights):
+
+#### Samza Container Placement
+Container Placements API gives you the ability to move / restart one or more containers (either active or standby) of your cluster based applications from one host to another without restarting your application. You can use these API to build maintenance, balancing & remediation tools. 
+
+#### Simplify Job Runner & Configs
+Job Runner will now simply submit Samza job to Yarn RM without executing any user code and job planning will happen on ClusterBasedJobCoordinator instead. This simplified workflow addresses security requirements where job submissions need to be isolated in order to execute user code as well as operational pain points where deployment failure could happen at multiple places.
+
+Full list of the jiras addressed in this release can be found [here](https://issues.apache.org/jira/issues/?jql=project%20%3D%20SAMZA%20and%20fixVersion%20in%20(1.5)).
+
+### Upgrading your application to Apache Samza 1.5.0
+ConfigFactory is deprecated as Job Runner does not load full job config anymore. Instead, ConfigLoaderFactory is introduced to be executed on ClusterBasedJobCoordinator to fetch full job config.
+If you are using the default PropertiesConfigFactory, simply switching to use the default PropertiesConfigLoaderFactory will work, otherwise if you are using a custom ConfigFactory, kindly creates its new counterpart following ConfigLoaderFactory. 
+
+Configs related to job submission must be explicitly provided to Job Runner as it is no longer loading full job config anymore. These configs include
+
+* Configs directly related to job submission, such as yarn.package.path, job.name etc.
+* Configs needed by the config loader on AM to fetch job config, such as path to the property file in the tarball, all of such configs will have a job.config.loader.properties prefix.
+* Configs that users would like to override
+
+Full list of the job submission configurations can be found [here](https://cwiki.apache.org/confluence/display/SAMZA/SEP-23%3A+Simplify+Job+Runner#SEP23:SimplifyJobRunner-References)
+
+#### Usage Instructions
+Alternative way when submitting job,
+{% highlight bash %}
+deploy/samza/bin/run-app.sh
+ --config yarn.package.path=<package_path>
+ --config job.name=<job_name>
+{% endhighlight %}
+can be simplified to
+{% highlight bash %}
+deploy/samza/bin/run-app.sh
+ --config-path=/path/to/submission/properties/file/submission.properties
+{% endhighlight %}
+where submission.properties contains
+{% highlight jproperties %}
+yarn.package.path=<package_path>
+job.name=<job_name>
+{% endhighlight %}
+
+#### Rollback Instructions
+In case of a problem in Samza 1.5, users can rollback to Samza 1.4 and keep the old start up flow using _config-path_ & _config-factory_.
+
+### Simplify Job Runner & Configs
+[SAMZA-2488](https://issues.apache.org/jira/browse/SAMZA-2488) Add JobCoordinatorLaunchUtil to handle common logic when launching job coordinator
+
+[SAMZA-2471](https://issues.apache.org/jira/browse/SAMZA-2471) Simplify CommandLine
+
+[SAMZA-2458](https://issues.apache.org/jira/browse/SAMZA-2458) Update ProcessJobFactory and ThreadJobFactory to load full job config
+
+[SAMZA-2453](https://issues.apache.org/jira/browse/SAMZA-2453) Update ClusterBasedJobCoordinator to support Beam jobs
+
+[SAMZA-2441](https://issues.apache.org/jira/browse/SAMZA-2441) Update ApplicationRunnerMain#ApplicationRunnerCommandLine not to load local file
+
+[SAMZA-2420](https://issues.apache.org/jira/browse/SAMZA-2420) Update CommandLine to use config loader for local config file
+
+### Container Placement API
+[SAMZA-2402](https://issues.apache.org/jira/browse/SAMZA-2402) Tie Container placement service and Container placement handler and validate placement requests
+
+[SAMZA-2379](https://issues.apache.org/jira/browse/SAMZA-2379) Support Container Placements for job running in degraded state
+
+[SAMZA-2378](https://issues.apache.org/jira/browse/SAMZA-2378) Container Placements support for Standby containers enabled jobs
+
+
+### Bug Fixes
+[SAMZA-2515](https://issues.apache.org/jira/browse/SAMZA-2515) Thread safety for Kafka consumer in KafkaConsumerProxy
+
+[SAMZA-2511](https://issues.apache.org/jira/browse/SAMZA-2511) Handle container-stop-fail in case of standby container failover
+
+[SAMZA-2510](https://issues.apache.org/jira/browse/SAMZA-2510) Incorrect shutdown status due to race between runloop thread and process callback thread
+
+[SAMZA-2506](https://issues.apache.org/jira/browse/SAMZA-2506) Inconsistent end of stream semantics in SystemStreamPartitionMetadata
+
+[SAMZA-2464](https://issues.apache.org/jira/browse/SAMZA-2464) Container shuts down when task fails to remove old state checkpoint dirs
+
+[SAMZA-2468](https://issues.apache.org/jira/browse/SAMZA-2468) Standby container needs to respond to shutdown request
+
+### Other Improvements
+[SAMZA-2519](https://issues.apache.org/jira/browse/SAMZA-2519) Support duplicate timer registration
+
+[SAMZA-2508](https://issues.apache.org/jira/browse/SAMZA-2508) Use cytodynamics classloader to launch job container
+
+[SAMZA-2478](https://issues.apache.org/jira/browse/SAMZA-2478) Add new metrics to track key and value size of records written to RocksDb
+
+[SAMZA-2462](https://issues.apache.org/jira/browse/SAMZA-2462) Adding metric for container thread pool size
+
+### Sources downloads
+A source download of Samza 1.5.0 is available [here](https://dist.apache.org/repos/dist/release/samza/1.5.0/), and is also available in Apache’s Maven repository. See Samza’s download [page](https://samza.apache.org/startup/download/) for details and Samza’s feature preview for new features.
diff --git a/docs/_config.yml b/docs/_config.yml
index da3f72c..c62607f 100644
--- a/docs/_config.yml
+++ b/docs/_config.yml
@@ -25,7 +25,7 @@
 baseurl: http://samza.apache.org
 version: latest
 # this is the version you will go if you click 'switch version' in "latest" pages.
-latest-release: '1.4.0'
+latest-release: '1.5.0'
 collections:
   menu:
     output: false
diff --git a/docs/_menu/index.html b/docs/_menu/index.html
index dc74b8f..243bc71 100644
--- a/docs/_menu/index.html
+++ b/docs/_menu/index.html
@@ -12,6 +12,8 @@
     items_attributes: 'data-documentation="/learn/documentation/version/"'
   - menu_title: Releases
     items:
+      - menu_title: 1.5.0
+        url: '/releases/1.5.0'
       - menu_title: 1.4.0
         url: '/releases/1.4.0'
       - menu_title: 1.3.1
diff --git a/docs/_releases/1.5.0.md b/docs/_releases/1.5.0.md
new file mode 100644
index 0000000..614a86f
--- /dev/null
+++ b/docs/_releases/1.5.0.md
@@ -0,0 +1,135 @@
+---
+version: '1.5.0'
+order: 150
+layout: page
+menu_title: '1.5'
+title: Apache Samza 1.5 <a href="/learn/documentation/1.5.0/">      [Docs] </a>
+---
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+**IMPORTANT NOTE**: As noted in the last release, this release contains **backward incompatible changes regarding samza job submission**. Details can be found on [SEP-23: Simplify Job Runner](https://cwiki.apache.org/confluence/display/SAMZA/SEP-23%3A+Simplify+Job+Runner)
+
+We are thrilled to announce the release of Apache Samza 1.5.0.
+
+Today, Samza forms the backbone of hundreds of real-time production applications across a multitude of companies, such as LinkedIn, Slack, and Redfin, among many others. Samza provides leading support for large-scale stateful stream processing with:
+
+* First class support for local states (with RocksDB store). This allows a stateful application to scale up to 1.1 Million events/sec on a single machine with SSD.
+
+* Support for incremental checkpointing of state instead of full snapshots. This enables Samza to scale to applications with very large states.
+
+* A fully asynchronous programming model that makes parallelizing remote calls efficient and effortless.
+
+* High level API for expressing complex stream processing pipelines in a few lines of code.
+
+* Beam Samza Runner that marries Beam’s best in class support for EventTime based windowed processing and sophisticated triggering with Samza’s stable and scalable stateful processing model.
+
+* A fully pluggable model for input sources (e.g. Kafka, Kinesis, DynamoDB streams etc.) and output systems (HDFS, Kafka, ElastiCache etc.).
+
+* A Table API that provides a common abstraction for accessing remote or local databases and allows developers to “join” an input event stream with such a Table.
+
+* Flexible deployment model for running the applications in any hosting environment and with cluster managers other than YARN.
+
+### New Features, Upgrades and Bug Fixes:
+This release brings the following features, upgrades, and capabilities (highlights):
+
+#### Samza Container Placement
+Container Placements API gives you the ability to move / restart one or more containers (either active or standby) of your cluster based applications from one host to another without restarting your application. You can use these api to build maintenance, balancing & remediation tools. 
+
+#### Simplify Job Runner & Configs
+Job Runner will now simply submit Samza job to Yarn RM without executing any user code and job planning will happen on ClusterBasedJobCoordinator instead. This simplified workflow addresses security requirements where job submissions need to be isolated in order to execute user code as well as operational pain points where deployment failure could happen at multiple places.
+
+Full list of the jiras addressed in this release can be found [here](https://issues.apache.org/jira/issues/?jql=project%20%3D%20SAMZA%20and%20fixVersion%20in%20(1.5)).
+
+### Upgrading your application to Apache Samza 1.5.0
+ConfigFactory is deprecated as Job Runner does not load full job config anymore. Instead, ConfigLoaderFactory is introduced to be executed on ClusterBasedJobCoordinator to fetch full job config.
+If you are using the default PropertiesConfigFactory, simply switching to use the default PropertiesConfigLoaderFactory will work, otherwise if you are using a custom ConfigFactory, kindly creates its new counterpart following ConfigLoaderFactory. 
+
+Configs related to job submission must be explicitly provided to Job Runner as it is no longer loading full job config anymore. These configs include
+
+* Configs directly related to job submission, such as yarn.package.path, job.name etc.
+* Configs needed by the config loader on AM to fetch job config, such as path to the property file in the tarball, all of such configs will have a job.config.loader.properties prefix.
+* Configs that users would like to override
+
+Full list of the job submission configurations can be found [here](https://cwiki.apache.org/confluence/display/SAMZA/SEP-23%3A+Simplify+Job+Runner#SEP23:SimplifyJobRunner-References)
+
+#### Usage Instructions
+Alternative way when submitting job,
+{% highlight bash %}
+deploy/samza/bin/run-app.sh
+ --config yarn.package.path=<package_path>
+ --config job.name=<job_name>
+{% endhighlight %}
+can be simplified to
+{% highlight bash %}
+deploy/samza/bin/run-app.sh
+ --config-path=/path/to/submission/properties/file/submission.properties
+{% endhighlight %}
+where submission.properties contains
+{% highlight jproperties %}
+yarn.package.path=<package_path>
+job.name=<job_name>
+{% endhighlight %}
+
+#### Rollback Instructions
+In case of a problem in Samza 1.5, users can rollback to Samza 1.4 and keep the old start up flow using _config-path_ & _config-factory_.
+
+### Simplify Job Runner
+[SAMZA-2488](https://issues.apache.org/jira/browse/SAMZA-2488) Add JobCoordinatorLaunchUtil to handle common logic when launching job coordinator
+
+[SAMZA-2471](https://issues.apache.org/jira/browse/SAMZA-2471) Simplify CommandLine
+
+[SAMZA-2458](https://issues.apache.org/jira/browse/SAMZA-2458) Update ProcessJobFactory and ThreadJobFactory to load full job config
+
+[SAMZA-2453](https://issues.apache.org/jira/browse/SAMZA-2453) Update ClusterBasedJobCoordinator to support Beam jobs
+
+[SAMZA-2441](https://issues.apache.org/jira/browse/SAMZA-2441) Update ApplicationRunnerMain#ApplicationRunnerCommandLine not to load local file
+
+[SAMZA-2420](https://issues.apache.org/jira/browse/SAMZA-2420) Update CommandLine to use config loader for local config file
+
+### Container Placement API
+[SAMZA-2402](https://issues.apache.org/jira/browse/SAMZA-2402) Tie Container placement service and Container placement handler and validate placement requests
+
+[SAMZA-2379](https://issues.apache.org/jira/browse/SAMZA-2379) Support Container Placements for job running in degraded state
+
+[SAMZA-2378](https://issues.apache.org/jira/browse/SAMZA-2378) Container Placements support for Standby containers enabled jobs
+
+
+### Bug Fixes
+[SAMZA-2515](https://issues.apache.org/jira/browse/SAMZA-2515) Thread safety for Kafka consumer in KafkaConsumerProxy
+
+[SAMZA-2511](https://issues.apache.org/jira/browse/SAMZA-2511) Handle container-stop-fail in case of standby container failover
+
+[SAMZA-2510](https://issues.apache.org/jira/browse/SAMZA-2510) Incorrect shutdown status due to race between runloop thread and process callback thread
+
+[SAMZA-2506](https://issues.apache.org/jira/browse/SAMZA-2506) Inconsistent end of stream semantics in SystemStreamPartitionMetadata
+
+[SAMZA-2464](https://issues.apache.org/jira/browse/SAMZA-2464) Container shuts down when task fails to remove old state checkpoint dirs
+
+[SAMZA-2468](https://issues.apache.org/jira/browse/SAMZA-2468) Standby container needs to respond to shutdown request
+
+### Other Improvements
+[SAMZA-2519](https://issues.apache.org/jira/browse/SAMZA-2519) Support duplicate timer registration
+
+[SAMZA-2508](https://issues.apache.org/jira/browse/SAMZA-2508) Use cytodynamics classloader to launch job container
+
+[SAMZA-2478](https://issues.apache.org/jira/browse/SAMZA-2478) Add new metrics to track key and value size of records written to RocksDb
+
+[SAMZA-2462](https://issues.apache.org/jira/browse/SAMZA-2462) Adding metric for container thread pool size
+
+### Sources downloads
+A source download of Samza 1.5.0 is available [here](https://dist.apache.org/repos/dist/release/samza/1.5.0/), and is also available in Apache’s Maven repository. See Samza’s download [page](https://samza.apache.org/startup/download/) for details and Samza’s feature preview for new features.
diff --git a/docs/archive/index.html b/docs/archive/index.html
index 46dc25a..11fe092 100644
--- a/docs/archive/index.html
+++ b/docs/archive/index.html
@@ -27,6 +27,14 @@
   <li><a href="../startup/hello-samza/latest">Hello Samza</a></li>
 </ul>
 
+<h4 id="1.5">1.5 Release</h4>
+
+<ul class="documentation-list">
+    <li><a href="../learn/documentation/1.5.0">Documentation</a></li>
+    <li><a href="../learn/tutorials/1.5.0">Tutorials</a></li>
+    <li><a href="../startup/hello-samza/1.5.0">Hello Samza</a></li>
+</ul>
+
 <h4 id="1.4">1.4 Release</h4>
 
 <ul class="documentation-list">
diff --git a/docs/learn/documentation/versioned/container/checkpointing.md b/docs/learn/documentation/versioned/container/checkpointing.md
index 9fb7e6d..a829b1f 100644
--- a/docs/learn/documentation/versioned/container/checkpointing.md
+++ b/docs/learn/documentation/versioned/container/checkpointing.md
@@ -108,15 +108,15 @@
 
 {% highlight bash %}
 samza-example/target/bin/checkpoint-tool.sh \
-  --config-path=file:///path/to/job/config.properties
+  --config-path=/path/to/job/config.properties
 {% endhighlight %}
 
 This command prints out the latest checkpoint in a properties file format. You can save the output to a file, and edit it as you wish. For example, to jump back to the oldest possible point in time, you can set all the offsets to 0. Then you can feed that properties file back into checkpoint-tool.sh and save the modified checkpoint:
 
 {% highlight bash %}
 samza-example/target/bin/checkpoint-tool.sh \
-  --config-path=file:///path/to/job/config.properties \
-  --new-offsets=file:///path/to/new/offsets.properties
+  --config-path=/path/to/job/config.properties \
+  --new-offsets=/path/to/new/offsets.properties
 {% endhighlight %}
 
 Note that Samza only reads checkpoints on container startup. In order for your checkpoint change to take effect, you need to first stop the job, then save the modified offsets, and then start the job again. If you write a checkpoint while the job is running, it will most likely have no effect.
diff --git a/docs/learn/documentation/versioned/container/coordinator-stream.md b/docs/learn/documentation/versioned/container/coordinator-stream.md
index 6b14960..8d528d6 100644
--- a/docs/learn/documentation/versioned/container/coordinator-stream.md
+++ b/docs/learn/documentation/versioned/container/coordinator-stream.md
@@ -114,7 +114,7 @@
 Samza provides a command line tool to write Job Configuration messages to the coordinator stream. The tool can be used as follows:
 {% highlight bash %}
 samza-example/target/bin/run-coordinator-stream-writer.sh \
-  --config-path=file:///path/to/job/config.properties \
+  --config-path=/path/to/job/config.properties \
   --type set-config \
   --key job.container.count \
   --value 8
diff --git a/docs/learn/documentation/versioned/container/state-management.md b/docs/learn/documentation/versioned/container/state-management.md
index 86b0d44..4371541 100644
--- a/docs/learn/documentation/versioned/container/state-management.md
+++ b/docs/learn/documentation/versioned/container/state-management.md
@@ -192,7 +192,7 @@
 
 {% highlight bash %}
 samza-example/target/bin/state-storage-tool.sh \
-  --config-path=file:///path/to/job/config.properties \
+  --config-path=/path/to/job/config.properties \
   --path=directory/to/put/state/stores
 {% endhighlight %}
 
@@ -202,7 +202,7 @@
 
 {% highlight bash %}
 samza-example/target/bin/read-rocksdb-tool.sh \
-  --config-path=file:///path/to/job/config.properties \
+  --config-path=/path/to/job/config.properties \
   --db-path=/tmp/nm-local-dir/state/test-state/Partition_0 \
   --db-name=test-state \
   --string-key=a,b,c
diff --git a/docs/learn/documentation/versioned/deployment/standalone.md b/docs/learn/documentation/versioned/deployment/standalone.md
index 6b2d168..115e3f3 100644
--- a/docs/learn/documentation/versioned/deployment/standalone.md
+++ b/docs/learn/documentation/versioned/deployment/standalone.md
@@ -72,7 +72,7 @@
 We are ready to run the example application [WikipediaZkLocalApplication](https://github.com/apache/samza-hello-samza/blob/master/src/main/java/samza/examples/wikipedia/application/WikipediaZkLocalApplication.java). This application reads messages from the wikipedia-edits topic, and calculates counts, every ten seconds, for all edits that were made during that window. It emits these results to another topic named `wikipedia-stats`.
 
 ```bash
-./deploy/samza/bin/run-class.sh samza.examples.wikipedia.application.WikipediaZkLocalApplication  --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-application-local-runner.properties
+./deploy/samza/bin/run-class.sh samza.examples.wikipedia.application.WikipediaZkLocalApplication  --config job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory --config job.config.loader.properties.path=$PWD/deploy/samza/config/wikipedia-application-local-runner.properties
 ```
 
 You can run the above command again to spin up a new instance of your application.
diff --git a/docs/learn/documentation/versioned/deployment/yarn.md b/docs/learn/documentation/versioned/deployment/yarn.md
index 3a46cea..36f88c0 100644
--- a/docs/learn/documentation/versioned/deployment/yarn.md
+++ b/docs/learn/documentation/versioned/deployment/yarn.md
@@ -97,7 +97,7 @@
 Once the archive is built, the `run-app.sh` script can be used to submit the application to YARN's resource manager. The script takes 2 CLI parameters - the config factory and the config file for the application. As an example, lets run our [FilterExample](https://github.com/apache/samza-hello-samza/blob/latest/src/main/java/samza/examples/cookbook/FilterExample.java) on YARN as follows:
 
 ```bash
-$ ./deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path ./deploy/samza/config/filter-example.properties
+$ ./deploy/samza/bin/run-app.sh --config-path=./deploy/samza/config/filter-example.properties
 ```
 
 Congratulations, you've successfully submitted your first job to YARN! You can view the YARN Web UI to view its status. 
diff --git a/docs/learn/documentation/versioned/jobs/job-runner.md b/docs/learn/documentation/versioned/jobs/job-runner.md
deleted file mode 100644
index 5f0607f..0000000
--- a/docs/learn/documentation/versioned/jobs/job-runner.md
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: page
-title: JobRunner
----
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-Samza jobs are started using a script called run-job.sh.
-
-{% highlight bash %}
-samza-example/target/bin/run-job.sh \
-  --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory \
-  --config-path=file://$PWD/config/hello-world.properties
-{% endhighlight %}
-
-You provide two parameters to the run-job.sh script. One is the config location, and the other is a factory class that is used to read your configuration file. The run-job.sh script is actually executing a Samza class called JobRunner. The JobRunner uses your ConfigFactory to get a Config object from the config path.
-
-{% highlight java %}
-public interface ConfigFactory {
-  Config getConfig(URI configUri);
-}
-{% endhighlight %}
-
-The Config object is just a wrapper around Map<String, String>, with some nice helper methods. Out of the box, Samza ships with the PropertiesConfigFactory, but developers can implement any kind of ConfigFactory they wish.
-
-Once the JobRunner gets your configuration, it gives your configuration to the StreamJobFactory class defined by the "job.factory" property. Samza ships with three job factory implementations: ThreadJobFactory, ProcessJobFactory and YarnJobFactory. The StreamJobFactory's responsibility is to give the JobRunner a job that it can run.
-
-{% highlight java %}
-public interface StreamJob {
-  StreamJob submit();
-
-  StreamJob kill();
-
-  ApplicationStatus waitForFinish(long timeoutMs);
-
-  ApplicationStatus waitForStatus(ApplicationStatus status, long timeoutMs);
-
-  ApplicationStatus getStatus();
-}
-{% endhighlight %}
-
-Once the JobRunner gets a job, it calls submit() on the job. This method is what tells the StreamJob implementation to start the SamzaContainer. In the case of LocalJobRunner, it uses a run-container.sh script to execute the SamzaContainer in a separate process, which will start one SamzaContainer locally on the machine that you ran run-job.sh on.
-
-This flow differs slightly when you use YARN, but we'll get to that later.
-
-## [Configuration &raquo;](configuration.html)
diff --git a/docs/learn/documentation/versioned/jobs/samza-configurations.md b/docs/learn/documentation/versioned/jobs/samza-configurations.md
index 5d4daed..932c4c8 100644
--- a/docs/learn/documentation/versioned/jobs/samza-configurations.md
+++ b/docs/learn/documentation/versioned/jobs/samza-configurations.md
@@ -271,6 +271,8 @@
 |systems.**_system-name_**.azureblob.flushTimeoutMs|180000 (3 mins)|timeout to finish uploading all blocks before committing a blob.|
 |systems.**_system-name_**.azureblob.closeTimeoutMs|300000 (5 mins)|timeout to finish committing all the blobs currently being written to. This does not include the flush timeout per blob.|
 |systems.**_system-name_**.azureblob.suffixRandomStringToBlobName|true|if true, a random string of 8 chars is suffixed to the blob name to prevent name collision when more than one Samza tasks are writing to the same SSP.|
+|systems.**_system-name_**.azureblob.metadataPropertiesGeneratorFactory|`org.apache.samza.system.`<br>`azureblob.utils.`<br>`NullBlobMetadataGeneratorFactory`|Fully qualified class name of the `org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory` impl for the system producer. <br><br>The default metadata generator does not add any metadata to the blob.| 
+|systems.**_system-name_**.azureblob.metadataGeneratorConfig| |Additional configs for the metadata generator should be prefixed with this string which is passed to the generator.<br>For example, to pass a "key":"value" pair to the metadata generator, add config like systems.<system-name>.azureblob.metadataGeneratorConfig.\<key\> with value \<value\>| 
 
 
 ### <a name="state-storage"></a>[4. State Storage](#state-storage)
diff --git a/docs/learn/documentation/versioned/rest/overview.md b/docs/learn/documentation/versioned/rest/overview.md
index c382f03..2872bd0 100644
--- a/docs/learn/documentation/versioned/rest/overview.md
+++ b/docs/learn/documentation/versioned/rest/overview.md
@@ -37,8 +37,8 @@
 Then from the extracted location, run the service using:
 {% highlight bash %}
 samza-example/target/bin/run-samza-rest-service.sh  \
-  --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory \
-  --config-path=file://$PWD/config/samza-rest.properties
+  --config job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory \
+  --config job.config.loader.properties.path=$PWD/config/samza-rest.properties
 {% endhighlight %}
 
 The two config parameters have the same purpose as they do for [run-job.sh](../jobs/job-runner.html).
diff --git a/docs/learn/tutorials/versioned/deploy-samza-to-CDH.md b/docs/learn/tutorials/versioned/deploy-samza-to-CDH.md
index e3b920b..f215a6a 100644
--- a/docs/learn/tutorials/versioned/deploy-samza-to-CDH.md
+++ b/docs/learn/tutorials/versioned/deploy-samza-to-CDH.md
@@ -66,5 +66,5 @@
 ### Run Samza Job
 
 {% highlight bash %}
-bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/config/wikipedia-parser.properties
+bin/run-app.sh --config-path=$PWD/config/wikipedia-parser.properties
 {% endhighlight %}
diff --git a/docs/learn/tutorials/versioned/hello-samza-high-level-yarn.md b/docs/learn/tutorials/versioned/hello-samza-high-level-yarn.md
index 69abf68..46f762f 100644
--- a/docs/learn/tutorials/versioned/hello-samza-high-level-yarn.md
+++ b/docs/learn/tutorials/versioned/hello-samza-high-level-yarn.md
@@ -63,7 +63,7 @@
 {% highlight bash %}
 mvn clean package
 mkdir -p deploy/samza
-tar -xvf ./target/hello-samza-1.4.0-SNAPSHOT-dist.tar.gz -C deploy/samza
+tar -xvf ./target/hello-samza-1.6.0-SNAPSHOT-dist.tar.gz -C deploy/samza
 {% endhighlight %}
 
 ### Run a Samza Application
@@ -71,7 +71,7 @@
 After you've built your Samza package, you can start the app on the grid using the run-app.sh script.
 
 {% highlight bash %}
-./deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-application.properties
+./deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-application.properties
 {% endhighlight %}
 
 The app will do all of the following:
@@ -115,7 +115,7 @@
 
 To shutdown the app, use the same _run-app.sh_ script with an extra _--operation=kill_ argument
 {% highlight bash %}
-./deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-application.properties --operation=kill
+./deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-application.properties --operation=kill
 {% endhighlight %}
 
 After you're done, you can clean everything up using the same grid script.
diff --git a/docs/learn/tutorials/versioned/hello-samza-high-level-zk.md b/docs/learn/tutorials/versioned/hello-samza-high-level-zk.md
index aa139e0..ae1d9c1 100644
--- a/docs/learn/tutorials/versioned/hello-samza-high-level-zk.md
+++ b/docs/learn/tutorials/versioned/hello-samza-high-level-zk.md
@@ -59,7 +59,7 @@
 {% highlight bash %}
 mvn clean package
 mkdir -p deploy/samza
-tar -xvf ./target/hello-samza-1.4.0-SNAPSHOT-dist.tar.gz -C deploy/samza
+tar -xvf ./target/hello-samza-1.6.0-SNAPSHOT-dist.tar.gz -C deploy/samza
 {% endhighlight %}
 
 We are now all set to deploy the application locally.
diff --git a/docs/learn/tutorials/versioned/remote-debugging-samza.md b/docs/learn/tutorials/versioned/remote-debugging-samza.md
index e0c7445..88c85fb 100644
--- a/docs/learn/tutorials/versioned/remote-debugging-samza.md
+++ b/docs/learn/tutorials/versioned/remote-debugging-samza.md
@@ -84,7 +84,7 @@
 mvn clean package
 mkdir -p deploy/samza
 tar -xvf ./target/hello-samza-1.1.0-dist.tar.gz -C deploy/samza
-deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-feed.properties
 {% endhighlight %}
 
 When the wikipedia-feed job starts up, a single Samza container will be created to process all incoming messages. This is the container that we'll want to connect to from the remote debugger.
diff --git a/docs/learn/tutorials/versioned/run-hello-samza-without-internet.md b/docs/learn/tutorials/versioned/run-hello-samza-without-internet.md
index e276cdb..487bd42 100644
--- a/docs/learn/tutorials/versioned/run-hello-samza-without-internet.md
+++ b/docs/learn/tutorials/versioned/run-hello-samza-without-internet.md
@@ -48,7 +48,7 @@
 We provide an alternative to get wikipedia feed data. Instead of running
 
 {% highlight bash %}
-deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-feed.properties
 {% endhighlight %}
 
 You will run
@@ -70,7 +70,7 @@
 The goal of
 
 {% highlight bash %}
-deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-feed.properties
 {% endhighlight %}
 
 is to deploy a Samza job which listens to wikipedia API, receives the feed in realtime and produces the feed to the Kafka topic wikipedia-raw. The alternative in this tutorial is reading local wikipedia feed in an infinite loop and producing the data to Kafka wikipedia-raw. The follow-up job, wikipedia-parser is getting data from Kafka topic wikipedia-raw, so as long as we have correct data in Kafka topic wikipedia-raw, we are fine. All Samza jobs are connected by the Kafka and do not depend on each other.
diff --git a/docs/learn/tutorials/versioned/run-in-multi-node-yarn.md b/docs/learn/tutorials/versioned/run-in-multi-node-yarn.md
index 837f6ed..e4745ce 100644
--- a/docs/learn/tutorials/versioned/run-in-multi-node-yarn.md
+++ b/docs/learn/tutorials/versioned/run-in-multi-node-yarn.md
@@ -167,7 +167,7 @@
 Go back to the original terminal (not the one running the HTTP server):
 
 {% highlight bash %}
-deploy/samza/bin/run-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-feed.properties
 {% endhighlight %}
 
 Go to http://yourHostname:8088 and find the wikipedia-feed job. Click on the ApplicationMaster link to see that it's running.
diff --git a/docs/learn/tutorials/versioned/samza-rest-getting-started.md b/docs/learn/tutorials/versioned/samza-rest-getting-started.md
index bd8137a..bbeba71 100644
--- a/docs/learn/tutorials/versioned/samza-rest-getting-started.md
+++ b/docs/learn/tutorials/versioned/samza-rest-getting-started.md
@@ -48,7 +48,7 @@
 {% highlight bash %}
 cd samza-rest/build/distributions/
 mkdir -p deploy/samza-rest
-tar -xvf ./samza-rest-1.4.0-SNAPSHOT.tgz -C deploy/samza-rest
+tar -xvf ./samza-rest_2.11-1.6.0-SNAPSHOT.tgz -C deploy/samza-rest
 {% endhighlight %}
 
 #### Configure the Installations Path
@@ -71,8 +71,8 @@
 {% highlight bash %}
 cd deploy/samza-rest
 ./bin/run-samza-rest-service.sh  \
-  --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory \
-  --config-path=file://$PWD/config/samza-rest.properties
+  --config job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory \
+  --config job.config.loader.properties.path=$PWD/config/samza-rest.properties
 {% endhighlight %}
 
 You provide two parameters to the run-samza-rest-service.sh script. One is the config location, and the other, optional, parameter is a factory class that is used to read your configuration file. The SamzaRestService uses your ConfigFactory to get a Config object from the config path. The ConfigFactory is covered in more detail on the [Job Runner page](../../documentation/{{site.version}}/jobs/job-runner.html). The run-samza-rest-service.sh script will block until the SamzaRestService terminates.
diff --git a/docs/learn/tutorials/versioned/samza-sql.md b/docs/learn/tutorials/versioned/samza-sql.md
index f905d4a..98a86b4 100644
--- a/docs/learn/tutorials/versioned/samza-sql.md
+++ b/docs/learn/tutorials/versioned/samza-sql.md
@@ -88,7 +88,7 @@
 After you've built your Samza package, you can start the app on the grid using the run-app.sh script.
 
 ```bash
-./deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/page-view-filter-sql.properties
+./deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/page-view-filter-sql.properties
 ```
 
 The app executes the following SQL command :
@@ -115,7 +115,7 @@
 
 To shutdown the app, use the same _run-app.sh_ script with an extra _--operation=kill_ argument
 ```bash
-./deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/page-view-filter-sql.properties --operation=kill
+./deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/page-view-filter-sql.properties --operation=kill
 ```
 
 Please follow the instructions from [Hello Samza High Level API - YARN Deployment](hello-samza-high-level-yarn.html) on how to shutdown and cleanup the app.
diff --git a/docs/startup/download/index.md b/docs/startup/download/index.md
index 4a3322b..41aa440 100644
--- a/docs/startup/download/index.md
+++ b/docs/startup/download/index.md
@@ -31,6 +31,7 @@
 
  Samza tools package contains command line tools that user can run to use Samza and it's input/output systems.
 
+ * [samza-tools_2.11-1.5.0.tgz](http://www-us.apache.org/dist/samza/1.5.0/samza-tools_2.11-1.5.0.tgz)
  * [samza-tools_2.11-1.4.0.tgz](http://www-us.apache.org/dist/samza/1.4.0/samza-tools_2.11-1.4.0.tgz)
  * [samza-tools_2.11-1.3.1.tgz](http://www-us.apache.org/dist/samza/1.3.1/samza-tools_2.11-1.3.1.tgz)
  * [samza-tools_2.11-1.3.0.tgz](http://www-us.apache.org/dist/samza/1.3.0/samza-tools_2.11-1.3.0.tgz)
@@ -41,6 +42,7 @@
 
 ### Source Releases
 
+ * [samza-sources-1.5.0.tgz](http://www.apache.org/dyn/closer.lua/samza/1.5.0)
  * [samza-sources-1.4.0.tgz](http://www.apache.org/dyn/closer.lua/samza/1.4.0)
  * [samza-sources-1.3.1.tgz](http://www.apache.org/dyn/closer.lua/samza/1.3.1)
  * [samza-sources-1.3.0.tgz](http://www.apache.org/dyn/closer.lua/samza/1.3.0)
@@ -73,12 +75,12 @@
 <dependency>
   <setId>org.apache.samza</setId>
   <artifactId>samza-api</artifactId>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
 </dependency>
 <dependency>
   <setId>org.apache.samza</setId>
   <artifactId>samza-core_2.11</artifactId>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
   <scope>runtime</scope>
 </dependency>
 <dependency>
@@ -86,37 +88,37 @@
   <artifactId>samza-shell</artifactId>
   <classifier>dist</classifier>
   <type>tgz</type>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
   <scope>runtime</scope>
 </dependency>
 <dependency>
   <setId>org.apache.samza</setId>
   <artifactId>samza-yarn_2.11</artifactId>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
   <scope>runtime</scope>
 </dependency>
 <dependency>
   <setId>org.apache.samza</setId>
   <artifactId>samza-kv_2.11</artifactId>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
   <scope>runtime</scope>
 </dependency>
 <dependency>
   <setId>org.apache.samza</setId>
   <artifactId>samza-kv-rocksdb_2.11</artifactId>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
   <scope>runtime</scope>
 </dependency>
 <dependency>
   <setId>org.apache.samza</setId>
   <artifactId>samza-kv-inmemory_2.11</artifactId>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
   <scope>runtime</scope>
 </dependency>
 <dependency>
   <setId>org.apache.samza</setId>
   <artifactId>samza-kafka_2.11</artifactId>
-  <version>1.4.0</version>
+  <version>1.5.0</version>
   <scope>runtime</scope>
 </dependency>
 {% endhighlight %}
diff --git a/docs/startup/hello-samza/versioned/index.md b/docs/startup/hello-samza/versioned/index.md
index 3573e26..8f0add3 100644
--- a/docs/startup/hello-samza/versioned/index.md
+++ b/docs/startup/hello-samza/versioned/index.md
@@ -52,7 +52,7 @@
 **(Optional)** NOTE: if you want the hello-samza jobs to run with a local Samza build (e.g., if you are a Samza developer), 
 make sure that you run the following steps, otherwise skip them.
 {% highlight bash %}
-In your hello-world project,  
+In your hello-samza project,  
 git checkout latest  
 In your local Samza project,  
 ./gradlew publishToMavenLocal  
@@ -63,7 +63,7 @@
 {% highlight bash %}
 mvn clean package
 mkdir -p deploy/samza
-tar -xvf ./target/hello-samza-1.4.0-SNAPSHOT-dist.tar.gz -C deploy/samza
+tar -xvf ./target/hello-samza-1.6.0-SNAPSHOT-dist.tar.gz -C deploy/samza
 {% endhighlight %}
 
 ### Run a Samza Job
@@ -71,7 +71,7 @@
 After you've built your Samza package, you can start a job on the grid using the run-app.sh script.
 
 {% highlight bash %}
-deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-feed.properties
 {% endhighlight %}
 
 The job will consume a feed of real-time edits from Wikipedia, and produce them to a Kafka topic called "wikipedia-raw". Give the job a minute to startup, and then tail the Kafka topic:
@@ -89,8 +89,8 @@
 Let's calculate some statistics based on the messages in the wikipedia-raw topic. Start two more jobs:
 
 {% highlight bash %}
-deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-parser.properties
-deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-stats.properties
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-parser.properties
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-stats.properties
 {% endhighlight %}
 
 The first job (wikipedia-parser) parses the messages in wikipedia-raw, and extracts information about the size of the edit, who made the change, etc. You can take a look at its output with:
@@ -120,7 +120,7 @@
 
 To shutdown one of the jobs, use the same script with an extra '--operation=kill' argument
 {% highlight bash %}
-deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties --operation=kill
+deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/wikipedia-feed.properties --operation=kill
 {% endhighlight %}
 
 After you're done, you can clean everything up using the same grid script.
diff --git a/docs/startup/quick-start/versioned/samza-sql.md b/docs/startup/quick-start/versioned/samza-sql.md
index b3bfdbd..4517698 100644
--- a/docs/startup/quick-start/versioned/samza-sql.md
+++ b/docs/startup/quick-start/versioned/samza-sql.md
@@ -76,7 +76,7 @@
 
 
 ```bash
-./deploy/samza/bin/run-app.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/page-view-filter-sql.properties
+./deploy/samza/bin/run-app.sh --config-path=$PWD/deploy/samza/config/page-view-filter-sql.properties
 ```
 
  
diff --git a/docs/startup/quick-start/versioned/samza.md b/docs/startup/quick-start/versioned/samza.md
index 4c3c32a..394f238 100644
--- a/docs/startup/quick-start/versioned/samza.md
+++ b/docs/startup/quick-start/versioned/samza.md
@@ -211,7 +211,7 @@
 
 {% highlight bash %}
 > export BASE_DIR=`pwd`
-> ./gradlew run --args="--config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$BASE_DIR/src/main/config/word-count.properties"
+> ./gradlew run --args="--config job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory --config job.config.loader.properties.path=$BASE_DIR/src/main/config/word-count.properties"
 {% endhighlight %}
 
 
diff --git a/gradle.properties b/gradle.properties
index dda382c..6fb78db 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -15,7 +15,7 @@
 # specific language governing permissions and limitations
 # under the License.
 group=org.apache.samza
-version=1.5.0-SNAPSHOT
+version=1.6.0-SNAPSHOT
 scalaSuffix=2.11
 
 # after changing this value, run `$ ./gradlew wrapper` and commit the resulting changed files
@@ -24,4 +24,4 @@
 org.gradle.jvmargs="-XX:MaxPermSize=512m"
 
 systemProp.file.encoding=utf-8
-checkstyleVersion=6.11
+checkstyleVersion=6.11.2
diff --git a/gradle/dependency-versions.gradle b/gradle/dependency-versions.gradle
index dfe6978..061f0b7 100644
--- a/gradle/dependency-versions.gradle
+++ b/gradle/dependency-versions.gradle
@@ -19,7 +19,7 @@
  ext {
   apacheCommonsCollections4Version = "4.0"
   avroVersion = "1.7.7"
-  calciteVersion = "1.19.0"
+  calciteVersion = "1.22.0"
   commonsCliVersion = "1.2"
   commonsCodecVersion = "1.9"
   commonsCollectionVersion = "3.2.1"
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index 44e7c4d..83639a3 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -1,3 +1,4 @@
+#Mon Jun 01 15:50:38 PDT 2020
 distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
 distributionUrl=https\://services.gradle.org/distributions/gradle-5.2.1-bin.zip
diff --git a/samza-api/src/main/java/org/apache/samza/metrics/SamzaHistogram.java b/samza-api/src/main/java/org/apache/samza/metrics/SamzaHistogram.java
index 85835f9..6ac7615 100644
--- a/samza-api/src/main/java/org/apache/samza/metrics/SamzaHistogram.java
+++ b/samza-api/src/main/java/org/apache/samza/metrics/SamzaHistogram.java
@@ -48,7 +48,7 @@
     this.gauges = this.percentiles.stream()
         .filter(x -> x > 0 && x <= 100)
         .collect(Collectors.toMap(Function.identity(),
-            x -> registry.newGauge(group, new HistogramGauge(x, name + "_" + String.valueOf(x), 0D))));
+          x -> registry.newGauge(group, new HistogramGauge(x, name + "_" + String.valueOf(x), 0D))));
   }
 
   public void update(long value) {
diff --git a/samza-api/src/test/java/org/apache/samza/serializers/TestJsonSerdeV2.java b/samza-api/src/test/java/org/apache/samza/serializers/TestJsonSerdeV2.java
index 2c087d2..f6ec8aa 100644
--- a/samza-api/src/test/java/org/apache/samza/serializers/TestJsonSerdeV2.java
+++ b/samza-api/src/test/java/org/apache/samza/serializers/TestJsonSerdeV2.java
@@ -37,11 +37,11 @@
     assertEquals(obj, serde.fromBytes(bytes));
     JsonSerdeV2<Map.Entry<String, Object>> serdeHashMapEntry = new JsonSerdeV2<>();
     obj.entrySet().forEach(entry -> {
-        try {
-          serdeHashMapEntry.toBytes(entry);
-        } catch (Exception e) {
-          fail("HashMap Entry serialization failed!");
-        }
-      });
+      try {
+        serdeHashMapEntry.toBytes(entry);
+      } catch (Exception e) {
+        fail("HashMap Entry serialization failed!");
+      }
+    });
   }
 }
diff --git a/samza-api/src/test/java/org/apache/samza/system/descriptors/examples/expanding/ExampleExpandingSystemDescriptor.java b/samza-api/src/test/java/org/apache/samza/system/descriptors/examples/expanding/ExampleExpandingSystemDescriptor.java
index 6ab1c8b..3f13e44 100644
--- a/samza-api/src/test/java/org/apache/samza/system/descriptors/examples/expanding/ExampleExpandingSystemDescriptor.java
+++ b/samza-api/src/test/java/org/apache/samza/system/descriptors/examples/expanding/ExampleExpandingSystemDescriptor.java
@@ -31,10 +31,8 @@
   private static final String FACTORY_CLASS_NAME = "org.apache.samza.GraphExpandingSystemFactory";
 
   public ExampleExpandingSystemDescriptor(String systemName) {
-    super(systemName, FACTORY_CLASS_NAME,
-        (InputTransformer<String>) IncomingMessageEnvelope::toString,
-        (streamGraph, inputDescriptor) -> (MessageStream<Long>) streamGraph.getInputStream(inputDescriptor)
-    );
+    super(systemName, FACTORY_CLASS_NAME, (InputTransformer<String>) IncomingMessageEnvelope::toString,
+      (streamGraph, inputDescriptor) -> (MessageStream<Long>) streamGraph.getInputStream(inputDescriptor));
   }
 
   @Override
diff --git a/samza-api/src/test/java/org/apache/samza/table/remote/TestTableRateLimiter.java b/samza-api/src/test/java/org/apache/samza/table/remote/TestTableRateLimiter.java
index f6f11ff..7de447c 100644
--- a/samza-api/src/test/java/org/apache/samza/table/remote/TestTableRateLimiter.java
+++ b/samza-api/src/test/java/org/apache/samza/table/remote/TestTableRateLimiter.java
@@ -45,10 +45,10 @@
   public TableRateLimiter<String, String> getThrottler(String tag) {
     TableRateLimiter.CreditFunction<String, String> credFn =
         (TableRateLimiter.CreditFunction<String, String>) (key, value, args) -> {
-      int credits = key == null ? 0 : 3;
-      credits += value == null ? 0 : 3;
-      return credits;
-    };
+          int credits = key == null ? 0 : 3;
+          credits += value == null ? 0 : 3;
+          return credits;
+        };
     RateLimiter rateLimiter = mock(RateLimiter.class);
     doReturn(Collections.singleton(tag)).when(rateLimiter).getSupportedTags();
     TableRateLimiter<String, String> rateLimitHelper = new TableRateLimiter<>("foo", rateLimiter, credFn, tag);
diff --git a/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisConfig.java b/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisConfig.java
index e0c9099..8e3fc7b 100644
--- a/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisConfig.java
+++ b/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisConfig.java
@@ -84,12 +84,12 @@
     // all properties should now start with stream name
     Set<String> streams = new HashSet<>();
     streamsConfig.keySet().forEach(key -> {
-        String[] parts = key.split("\\.", 2);
-        if (parts.length != 2) {
-          throw new IllegalArgumentException("Ill-formatted stream config: " + key);
-        }
-        streams.add(parts[0]);
-      });
+      String[] parts = key.split("\\.", 2);
+      if (parts.length != 2) {
+        throw new IllegalArgumentException("Ill-formatted stream config: " + key);
+      }
+      streams.add(parts[0]);
+    });
     return streams;
   }
 
diff --git a/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisSystemFactory.java b/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisSystemFactory.java
index 2758022..60d3b37 100644
--- a/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisSystemFactory.java
+++ b/samza-aws/src/main/java/org/apache/samza/system/kinesis/KinesisSystemFactory.java
@@ -76,11 +76,11 @@
     // Kinesis streams cannot be configured as bootstrap streams
     KinesisConfig kConfig = new KinesisConfig(config);
     kConfig.getKinesisStreams(system).forEach(stream -> {
-        StreamConfig streamConfig = new StreamConfig(kConfig);
-        SystemStream ss = new SystemStream(system, stream);
-        if (streamConfig.getBootstrapEnabled(ss)) {
-          throw new ConfigException("Kinesis streams cannot be configured as bootstrap streams.");
-        }
-      });
+      StreamConfig streamConfig = new StreamConfig(kConfig);
+      SystemStream ss = new SystemStream(system, stream);
+      if (streamConfig.getBootstrapEnabled(ss)) {
+        throw new ConfigException("Kinesis streams cannot be configured as bootstrap streams.");
+      }
+    });
   }
 }
diff --git a/samza-aws/src/main/java/org/apache/samza/system/kinesis/consumer/KinesisSystemConsumer.java b/samza-aws/src/main/java/org/apache/samza/system/kinesis/consumer/KinesisSystemConsumer.java
index 80d43de..16cd4d5 100644
--- a/samza-aws/src/main/java/org/apache/samza/system/kinesis/consumer/KinesisSystemConsumer.java
+++ b/samza-aws/src/main/java/org/apache/samza/system/kinesis/consumer/KinesisSystemConsumer.java
@@ -215,19 +215,19 @@
   public void afterCheckpoint(Map<SystemStreamPartition, String> sspOffsets) {
     LOG.info("afterCheckpoint called with sspOffsets {}", sspOffsets);
     sspOffsets.forEach((ssp, offset) -> {
-        KinesisRecordProcessor processor = processors.get(ssp);
-        KinesisSystemConsumerOffset kinesisOffset = KinesisSystemConsumerOffset.parse(offset);
-        if (processor == null) {
-          LOG.info("Kinesis Processor is not alive for ssp {}. This could be the result of rebalance. Hence dropping the"
-              + " checkpoint {}.", ssp, offset);
-        } else if (!kinesisOffset.getShardId().equals(processor.getShardId())) {
-          LOG.info("KinesisProcessor for ssp {} currently owns shard {} while the checkpoint is for shard {}. This could"
-              + " be the result of rebalance. Hence dropping the checkpoint {}.", ssp, processor.getShardId(),
-              kinesisOffset.getShardId(), offset);
-        } else {
-          processor.checkpoint(kinesisOffset.getSeqNumber());
-        }
-      });
+      KinesisRecordProcessor processor = processors.get(ssp);
+      KinesisSystemConsumerOffset kinesisOffset = KinesisSystemConsumerOffset.parse(offset);
+      if (processor == null) {
+        LOG.info("Kinesis Processor is not alive for ssp {}. This could be the result of rebalance. Hence dropping the"
+            + " checkpoint {}.", ssp, offset);
+      } else if (!kinesisOffset.getShardId().equals(processor.getShardId())) {
+        LOG.info("KinesisProcessor for ssp {} currently owns shard {} while the checkpoint is for shard {}. This could"
+            + " be the result of rebalance. Hence dropping the checkpoint {}.", ssp, processor.getShardId(),
+            kinesisOffset.getShardId(), offset);
+      } else {
+        processor.checkpoint(kinesisOffset.getSeqNumber());
+      }
+    });
   }
 
   @Override
diff --git a/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisInputDescriptor.java b/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisInputDescriptor.java
index fcce49e..130a189 100644
--- a/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisInputDescriptor.java
+++ b/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisInputDescriptor.java
@@ -110,12 +110,11 @@
     String clientConfigPrefix =
         String.format(KinesisConfig.CONFIG_STREAM_KINESIS_CLIENT_LIB_CONFIG, systemName, streamId);
 
-    region.ifPresent(
-        val -> config.put(String.format(KinesisConfig.CONFIG_STREAM_REGION, systemName, streamId), val));
+    region.ifPresent(val -> config.put(String.format(KinesisConfig.CONFIG_STREAM_REGION, systemName, streamId), val));
     accessKey.ifPresent(
-        val -> config.put(String.format(KinesisConfig.CONFIG_STREAM_ACCESS_KEY, systemName, streamId), val));
+      val -> config.put(String.format(KinesisConfig.CONFIG_STREAM_ACCESS_KEY, systemName, streamId), val));
     secretKey.ifPresent(
-        val -> config.put(String.format(KinesisConfig.CONFIG_STREAM_SECRET_KEY, systemName, streamId), val));
+      val -> config.put(String.format(KinesisConfig.CONFIG_STREAM_SECRET_KEY, systemName, streamId), val));
     kclConfig.forEach((k, v) -> config.put(clientConfigPrefix + k, v));
 
     return config;
diff --git a/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisSystemDescriptor.java b/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisSystemDescriptor.java
index 678dfe6..f5f27c9 100644
--- a/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisSystemDescriptor.java
+++ b/samza-aws/src/main/java/org/apache/samza/system/kinesis/descriptors/KinesisSystemDescriptor.java
@@ -121,12 +121,10 @@
     Map<String, String> config = new HashMap<>(super.toConfig());
     String systemName = getSystemName();
 
-    region.ifPresent(
-        val -> config.put(String.format(KinesisConfig.CONFIG_SYSTEM_REGION, systemName), val));
-    proxyHost.ifPresent(
-        val -> config.put(String.format(KinesisConfig.CONFIG_PROXY_HOST, systemName), val));
+    region.ifPresent(val -> config.put(String.format(KinesisConfig.CONFIG_SYSTEM_REGION, systemName), val));
+    proxyHost.ifPresent(val -> config.put(String.format(KinesisConfig.CONFIG_PROXY_HOST, systemName), val));
     proxyPort.ifPresent(
-        val -> config.put(String.format(KinesisConfig.CONFIG_PROXY_PORT, systemName), String.valueOf(val)));
+      val -> config.put(String.format(KinesisConfig.CONFIG_PROXY_PORT, systemName), String.valueOf(val)));
 
     String kclConfigPrefix = String.format(KinesisConfig.CONFIG_SYSTEM_KINESIS_CLIENT_LIB_CONFIG, systemName);
     kclConfig.forEach((k, v) -> config.put(kclConfigPrefix + k, v));
diff --git a/samza-aws/src/main/java/org/apache/samza/system/kinesis/metrics/KinesisSystemConsumerMetrics.java b/samza-aws/src/main/java/org/apache/samza/system/kinesis/metrics/KinesisSystemConsumerMetrics.java
index 2f42981..e3a9c55 100644
--- a/samza-aws/src/main/java/org/apache/samza/system/kinesis/metrics/KinesisSystemConsumerMetrics.java
+++ b/samza-aws/src/main/java/org/apache/samza/system/kinesis/metrics/KinesisSystemConsumerMetrics.java
@@ -73,7 +73,7 @@
         .collect(Collectors.toConcurrentMap(Function.identity(), x -> new SamzaHistogram(registry, x, READ_LATENCY)));
     millisBehindLatest = streamNames.stream()
         .collect(Collectors.toConcurrentMap(Function.identity(),
-            x -> new SamzaHistogram(registry, x, MILLIS_BEHIND_LATEST)));
+          x -> new SamzaHistogram(registry, x, MILLIS_BEHIND_LATEST)));
 
     // Locking to ensure that these aggregated metrics will be created only once across multiple system consumers.
     synchronized (LOCK) {
diff --git a/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisRecordProcessor.java b/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisRecordProcessor.java
index 6f1f052..2551d07 100644
--- a/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisRecordProcessor.java
+++ b/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisRecordProcessor.java
@@ -233,22 +233,22 @@
       List<KinesisRecordProcessor> processors) {
     Map<KinesisRecordProcessor, List<Record>> processorRecordMap = new HashMap<>();
     processors.forEach(processor -> {
-        try {
-          // Create records and call process records
-          IRecordProcessorCheckpointer checkpointer = Mockito.mock(IRecordProcessorCheckpointer.class);
-          doNothing().when(checkpointer).checkpoint(anyString());
-          doNothing().when(checkpointer).checkpoint();
-          ProcessRecordsInput processRecordsInput = Mockito.mock(ProcessRecordsInput.class);
-          when(processRecordsInput.getCheckpointer()).thenReturn(checkpointer);
-          when(processRecordsInput.getMillisBehindLatest()).thenReturn(1000L);
-          List<Record> inputRecords = createRecords(numRecordsPerShard);
-          processorRecordMap.put(processor, inputRecords);
-          when(processRecordsInput.getRecords()).thenReturn(inputRecords);
-          processor.processRecords(processRecordsInput);
-        } catch (ShutdownException | InvalidStateException ex) {
-          throw new RuntimeException(ex);
-        }
-      });
+      try {
+        // Create records and call process records
+        IRecordProcessorCheckpointer checkpointer = Mockito.mock(IRecordProcessorCheckpointer.class);
+        doNothing().when(checkpointer).checkpoint(anyString());
+        doNothing().when(checkpointer).checkpoint();
+        ProcessRecordsInput processRecordsInput = Mockito.mock(ProcessRecordsInput.class);
+        when(processRecordsInput.getCheckpointer()).thenReturn(checkpointer);
+        when(processRecordsInput.getMillisBehindLatest()).thenReturn(1000L);
+        List<Record> inputRecords = createRecords(numRecordsPerShard);
+        processorRecordMap.put(processor, inputRecords);
+        when(processRecordsInput.getRecords()).thenReturn(inputRecords);
+        processor.processRecords(processRecordsInput);
+      } catch (ShutdownException | InvalidStateException ex) {
+        throw new RuntimeException(ex);
+      }
+    });
     return processorRecordMap;
   }
 
diff --git a/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisSystemConsumer.java b/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisSystemConsumer.java
index fe7fa96..235c829 100644
--- a/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisSystemConsumer.java
+++ b/samza-aws/src/test/java/org/apache/samza/system/kinesis/consumer/TestKinesisSystemConsumer.java
@@ -103,9 +103,9 @@
     List<SystemStreamPartition> ssps = new LinkedList<>();
     IntStream.range(0, numShards)
         .forEach(p -> {
-            SystemStreamPartition ssp = new SystemStreamPartition(system, stream, new Partition(p));
-            ssps.add(ssp);
-          });
+          SystemStreamPartition ssp = new SystemStreamPartition(system, stream, new Partition(p));
+          ssps.add(ssp);
+        });
     ssps.forEach(ssp -> consumer.register(ssp, SYSTEM_CONSUMER_REGISTER_OFFSET));
 
     // Create Kinesis record processor factory
@@ -133,47 +133,47 @@
 
     Map<SystemStreamPartition, KinesisRecordProcessor> sspToProcessorMap = getProcessorMap(consumer);
     ssps.forEach(ssp -> {
-        try {
-          KinesisRecordProcessor processor = sspToProcessorMap.get(ssp);
+      try {
+        KinesisRecordProcessor processor = sspToProcessorMap.get(ssp);
 
-          // Verify that the read messages are received in order and are the same as input records
-          Assert.assertEquals(messages.get(ssp).size(), numRecordsPerShard);
-          List<IncomingMessageEnvelope> envelopes = messages.get(ssp);
-          List<Record> inputRecords = inputRecordMap.get(processor);
-          verifyRecords(envelopes, inputRecords, processor.getShardId());
+        // Verify that the read messages are received in order and are the same as input records
+        Assert.assertEquals(messages.get(ssp).size(), numRecordsPerShard);
+        List<IncomingMessageEnvelope> envelopes = messages.get(ssp);
+        List<Record> inputRecords = inputRecordMap.get(processor);
+        verifyRecords(envelopes, inputRecords, processor.getShardId());
 
-          // Call checkpoint on consumer and verify that the checkpoint is called with the right offset
-          IncomingMessageEnvelope lastEnvelope = envelopes.get(envelopes.size() - 1);
-          consumer.afterCheckpoint(Collections.singletonMap(ssp, lastEnvelope.getOffset()));
-          ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
-          verify(getCheckpointer(processor)).checkpoint(argument.capture());
-          Assert.assertEquals(inputRecords.get(inputRecords.size() - 1).getSequenceNumber(), argument.getValue());
+        // Call checkpoint on consumer and verify that the checkpoint is called with the right offset
+        IncomingMessageEnvelope lastEnvelope = envelopes.get(envelopes.size() - 1);
+        consumer.afterCheckpoint(Collections.singletonMap(ssp, lastEnvelope.getOffset()));
+        ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
+        verify(getCheckpointer(processor)).checkpoint(argument.capture());
+        Assert.assertEquals(inputRecords.get(inputRecords.size() - 1).getSequenceNumber(), argument.getValue());
 
-          // Call shutdown (with ZOMBIE reason) on processor and verify if shutdown freed the ssp mapping
-          shutDownProcessor(processor, ShutdownReason.ZOMBIE);
-          Assert.assertFalse(sspToProcessorMap.containsValue(processor));
-          Assert.assertTrue(isSspAvailable(consumer, ssp));
-        } catch (NoSuchFieldException | IllegalAccessException | InvalidStateException | ShutdownException ex) {
-          throw new RuntimeException(ex);
-        }
-      });
+        // Call shutdown (with ZOMBIE reason) on processor and verify if shutdown freed the ssp mapping
+        shutDownProcessor(processor, ShutdownReason.ZOMBIE);
+        Assert.assertFalse(sspToProcessorMap.containsValue(processor));
+        Assert.assertTrue(isSspAvailable(consumer, ssp));
+      } catch (NoSuchFieldException | IllegalAccessException | InvalidStateException | ShutdownException ex) {
+        throw new RuntimeException(ex);
+      }
+    });
   }
 
   private Map<String, KinesisRecordProcessor> createAndInitProcessors(IRecordProcessorFactory factory, int numShards) {
     Map<String, KinesisRecordProcessor> processorMap = new HashMap<>();
     IntStream.range(0, numShards)
         .forEach(p -> {
-            String shardId = String.format("shard-%05d", p);
-            // Create Kinesis processor
-            KinesisRecordProcessor processor = (KinesisRecordProcessor) factory.createProcessor();
+          String shardId = String.format("shard-%05d", p);
+          // Create Kinesis processor
+          KinesisRecordProcessor processor = (KinesisRecordProcessor) factory.createProcessor();
 
-            // Initialize the shard
-            ExtendedSequenceNumber seqNum = new ExtendedSequenceNumber("0000");
-            InitializationInput initializationInput =
-                new InitializationInput().withShardId(shardId).withExtendedSequenceNumber(seqNum);
-            processor.initialize(initializationInput);
-            processorMap.put(shardId, processor);
-          });
+          // Initialize the shard
+          ExtendedSequenceNumber seqNum = new ExtendedSequenceNumber("0000");
+          InitializationInput initializationInput =
+              new InitializationInput().withShardId(shardId).withExtendedSequenceNumber(seqNum);
+          processor.initialize(initializationInput);
+          processorMap.put(shardId, processor);
+        });
     return processorMap;
   }
 
@@ -186,12 +186,12 @@
       Map<SystemStreamPartition, List<IncomingMessageEnvelope>> receivedMessages =
           consumer.poll(ssps, Duration.ofSeconds(1).toMillis());
       receivedMessages.forEach((key, value) -> {
-          if (messages.containsKey(key)) {
-            messages.get(key).addAll(value);
-          } else {
-            messages.put(key, new ArrayList<>(value));
-          }
-        });
+        if (messages.containsKey(key)) {
+          messages.get(key).addAll(value);
+        } else {
+          messages.put(key, new ArrayList<>(value));
+        }
+      });
       totalEventsConsumed = messages.values().stream().mapToInt(List::size).sum();
     }
 
@@ -205,19 +205,19 @@
   private void verifyRecords(List<IncomingMessageEnvelope> outputRecords, List<Record> inputRecords, String shardId) {
     Iterator outputRecordsIter = outputRecords.iterator();
     inputRecords.forEach(record -> {
-        IncomingMessageEnvelope envelope = (IncomingMessageEnvelope) outputRecordsIter.next();
-        String outputKey = (String) envelope.getKey();
-        KinesisIncomingMessageEnvelope kinesisMessageEnvelope = (KinesisIncomingMessageEnvelope) envelope;
-        Assert.assertEquals(outputKey, record.getPartitionKey());
-        Assert.assertEquals(kinesisMessageEnvelope.getSequenceNumber(), record.getSequenceNumber());
-        Assert.assertEquals(kinesisMessageEnvelope.getApproximateArrivalTimestamp(),
-            record.getApproximateArrivalTimestamp());
-        Assert.assertEquals(kinesisMessageEnvelope.getShardId(), shardId);
-        ByteBuffer outputData = ByteBuffer.wrap((byte[]) kinesisMessageEnvelope.getMessage());
-        record.getData().rewind();
-        Assert.assertEquals(outputData, record.getData());
-        verifyOffset(envelope.getOffset(), record, shardId);
-      });
+      IncomingMessageEnvelope envelope = (IncomingMessageEnvelope) outputRecordsIter.next();
+      String outputKey = (String) envelope.getKey();
+      KinesisIncomingMessageEnvelope kinesisMessageEnvelope = (KinesisIncomingMessageEnvelope) envelope;
+      Assert.assertEquals(outputKey, record.getPartitionKey());
+      Assert.assertEquals(kinesisMessageEnvelope.getSequenceNumber(), record.getSequenceNumber());
+      Assert.assertEquals(kinesisMessageEnvelope.getApproximateArrivalTimestamp(),
+          record.getApproximateArrivalTimestamp());
+      Assert.assertEquals(kinesisMessageEnvelope.getShardId(), shardId);
+      ByteBuffer outputData = ByteBuffer.wrap((byte[]) kinesisMessageEnvelope.getMessage());
+      record.getData().rewind();
+      Assert.assertEquals(outputData, record.getData());
+      verifyOffset(envelope.getOffset(), record, shardId);
+    });
   }
 
   private void verifyOffset(String offset, Record inputRecord, String shardId) {
diff --git a/samza-azure/src/main/java/org/apache/samza/coordinator/AzureJobCoordinator.java b/samza-azure/src/main/java/org/apache/samza/coordinator/AzureJobCoordinator.java
index 7fc1423..08e8124 100644
--- a/samza-azure/src/main/java/org/apache/samza/coordinator/AzureJobCoordinator.java
+++ b/samza-azure/src/main/java/org/apache/samza/coordinator/AzureJobCoordinator.java
@@ -485,12 +485,12 @@
       // Schedule a task to renew the lease after a fixed time interval
       LOG.info("Starting scheduler to keep renewing lease held by the leader.");
       renewLease = new RenewLeaseScheduler((errorMsg) -> {
-          LOG.error(errorMsg);
-          table.updateIsLeader(currentJMVersion.get(), processorId, false);
-          azureLeaderElector.resignLeadership();
-          renewLease.shutdown();
-          liveness.shutdown();
-        }, azureLeaderElector.getLeaseBlobManager(), azureLeaderElector.getLeaseId());
+        LOG.error(errorMsg);
+        table.updateIsLeader(currentJMVersion.get(), processorId, false);
+        azureLeaderElector.resignLeadership();
+        renewLease.shutdown();
+        liveness.shutdown();
+      }, azureLeaderElector.getLeaseBlobManager(), azureLeaderElector.getLeaseId());
       renewLease.scheduleTask();
 
       doOnProcessorChange(new ArrayList<>());
diff --git a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/HeartbeatScheduler.java b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/HeartbeatScheduler.java
index 2abb380..68d272a 100644
--- a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/HeartbeatScheduler.java
+++ b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/HeartbeatScheduler.java
@@ -60,14 +60,14 @@
   @Override
   public ScheduledFuture scheduleTask() {
     return scheduler.scheduleWithFixedDelay(() -> {
-        try {
-          String currJVM = currentJMVersion.get();
-          LOG.info("Updating heartbeat for processor ID: " + processorId + " and job model version: " + currJVM);
-          table.updateHeartbeat(currJVM, processorId);
-        } catch (Exception e) {
-          errorHandler.accept("Exception in Heartbeat Scheduler. Stopping the processor...");
-        }
-      }, HEARTBEAT_DELAY_SEC, HEARTBEAT_DELAY_SEC, TimeUnit.SECONDS);
+      try {
+        String currJVM = currentJMVersion.get();
+        LOG.info("Updating heartbeat for processor ID: " + processorId + " and job model version: " + currJVM);
+        table.updateHeartbeat(currJVM, processorId);
+      } catch (Exception e) {
+        errorHandler.accept("Exception in Heartbeat Scheduler. Stopping the processor...");
+      }
+    }, HEARTBEAT_DELAY_SEC, HEARTBEAT_DELAY_SEC, TimeUnit.SECONDS);
   }
 
   @Override
diff --git a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/JMVersionUpgradeScheduler.java b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/JMVersionUpgradeScheduler.java
index 235b1f8..03260a3 100644
--- a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/JMVersionUpgradeScheduler.java
+++ b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/JMVersionUpgradeScheduler.java
@@ -67,24 +67,24 @@
   @Override
   public ScheduledFuture scheduleTask() {
     return scheduler.scheduleWithFixedDelay(() -> {
-        try {
-          LOG.info("Checking for job model version upgrade");
-          // Read job model version from the blob.
-          String blobJMV = blob.getJobModelVersion();
-          LOG.info("Job Model Version seen on the blob: {}", blobJMV);
-          String blobBarrierState = blob.getBarrierState();
-          String currentJMV = currentJMVersion.get();
-          LOG.info("Current Job Model Version that the job coordinator is working on: {}", currentJMV);
-          String expectedBarrierState = BarrierState.START.toString() + " " + blobJMV;
-          List<String> processorList = blob.getLiveProcessorList();
-          // Check if the job model version on the blob is consistent with the job model version that the processor is operating on.
-          if (processorList != null && processorList.contains(processorId) && !currentJMV.equals(blobJMV) && blobBarrierState.equals(expectedBarrierState) && !versionUpgradeDetected.get()) {
-            listener.onStateChange();
-          }
-        } catch (Exception e) {
-          errorHandler.accept("Exception in Job Model Version Upgrade Scheduler. Stopping the processor...");
+      try {
+        LOG.info("Checking for job model version upgrade");
+        // Read job model version from the blob.
+        String blobJMV = blob.getJobModelVersion();
+        LOG.info("Job Model Version seen on the blob: {}", blobJMV);
+        String blobBarrierState = blob.getBarrierState();
+        String currentJMV = currentJMVersion.get();
+        LOG.info("Current Job Model Version that the job coordinator is working on: {}", currentJMV);
+        String expectedBarrierState = BarrierState.START.toString() + " " + blobJMV;
+        List<String> processorList = blob.getLiveProcessorList();
+        // Check if the job model version on the blob is consistent with the job model version that the processor is operating on.
+        if (processorList != null && processorList.contains(processorId) && !currentJMV.equals(blobJMV) && blobBarrierState.equals(expectedBarrierState) && !versionUpgradeDetected.get()) {
+          listener.onStateChange();
         }
-      }, JMV_UPGRADE_DELAY_SEC, JMV_UPGRADE_DELAY_SEC, TimeUnit.SECONDS);
+      } catch (Exception e) {
+        errorHandler.accept("Exception in Job Model Version Upgrade Scheduler. Stopping the processor...");
+      }
+    }, JMV_UPGRADE_DELAY_SEC, JMV_UPGRADE_DELAY_SEC, TimeUnit.SECONDS);
   }
 
   @Override
diff --git a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderBarrierCompleteScheduler.java b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderBarrierCompleteScheduler.java
index 7386fa9..f7d4d93 100644
--- a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderBarrierCompleteScheduler.java
+++ b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderBarrierCompleteScheduler.java
@@ -77,32 +77,32 @@
   @Override
   public ScheduledFuture scheduleTask() {
     return scheduler.scheduleWithFixedDelay(() -> {
-        try {
-          if (!table.getEntity(currentJMVersion.get(), processorId).getIsLeader()) {
-            LOG.info("Not the leader anymore. Shutting down LeaderBarrierCompleteScheduler.");
+      try {
+        if (!table.getEntity(currentJMVersion.get(), processorId).getIsLeader()) {
+          LOG.info("Not the leader anymore. Shutting down LeaderBarrierCompleteScheduler.");
+          barrierTimeout.getAndSet(true);
+          listener.onStateChange();
+        } else {
+          LOG.info("Leader checking for barrier state");
+          // Get processor IDs listed in the table that have the new job model verion.
+          Iterable<ProcessorEntity> tableList = table.getEntitiesWithPartition(nextJMVersion);
+          Set<String> tableProcessors = new HashSet<>();
+          for (ProcessorEntity entity : tableList) {
+            tableProcessors.add(entity.getRowKey());
+          }
+          LOG.info("List of live processors as seen on the blob = {}", blobProcessorSet);
+          LOG.info("List of live processors as seen in the table = {}", tableProcessors);
+          if ((System.currentTimeMillis() - startTime) > (BARRIER_TIMEOUT_SEC * 1000)) {
             barrierTimeout.getAndSet(true);
             listener.onStateChange();
-          } else {
-            LOG.info("Leader checking for barrier state");
-            // Get processor IDs listed in the table that have the new job model verion.
-            Iterable<ProcessorEntity> tableList = table.getEntitiesWithPartition(nextJMVersion);
-            Set<String> tableProcessors = new HashSet<>();
-            for (ProcessorEntity entity : tableList) {
-              tableProcessors.add(entity.getRowKey());
-            }
-            LOG.info("List of live processors as seen on the blob = {}", blobProcessorSet);
-            LOG.info("List of live processors as seen in the table = {}", tableProcessors);
-            if ((System.currentTimeMillis() - startTime) > (BARRIER_TIMEOUT_SEC * 1000)) {
-              barrierTimeout.getAndSet(true);
-              listener.onStateChange();
-            } else if (blobProcessorSet.equals(tableProcessors)) {
-              listener.onStateChange();
-            }
+          } else if (blobProcessorSet.equals(tableProcessors)) {
+            listener.onStateChange();
           }
-        } catch (Exception e) {
-          errorHandler.accept("Exception in LeaderBarrierCompleteScheduler. Stopping the processor...");
         }
-      }, BARRIER_REACHED_DELAY_SEC, BARRIER_REACHED_DELAY_SEC, TimeUnit.SECONDS);
+      } catch (Exception e) {
+        errorHandler.accept("Exception in LeaderBarrierCompleteScheduler. Stopping the processor...");
+      }
+    }, BARRIER_REACHED_DELAY_SEC, BARRIER_REACHED_DELAY_SEC, TimeUnit.SECONDS);
   }
 
   @Override
diff --git a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderLivenessCheckScheduler.java b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderLivenessCheckScheduler.java
index 59a8123..c3502a3 100644
--- a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderLivenessCheckScheduler.java
+++ b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LeaderLivenessCheckScheduler.java
@@ -64,15 +64,15 @@
   @Override
   public ScheduledFuture scheduleTask() {
     return scheduler.scheduleWithFixedDelay(() -> {
-        try {
-          LOG.info("Checking for leader liveness");
-          if (!checkIfLeaderAlive()) {
-            listener.onStateChange();
-          }
-        } catch (Exception e) {
-          errorHandler.accept("Exception in Leader Liveness Check Scheduler. Stopping the processor...");
+      try {
+        LOG.info("Checking for leader liveness");
+        if (!checkIfLeaderAlive()) {
+          listener.onStateChange();
         }
-      }, LIVENESS_CHECK_DELAY_SEC, LIVENESS_CHECK_DELAY_SEC, TimeUnit.SECONDS);
+      } catch (Exception e) {
+        errorHandler.accept("Exception in Leader Liveness Check Scheduler. Stopping the processor...");
+      }
+    }, LIVENESS_CHECK_DELAY_SEC, LIVENESS_CHECK_DELAY_SEC, TimeUnit.SECONDS);
   }
 
   @Override
diff --git a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LivenessCheckScheduler.java b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LivenessCheckScheduler.java
index d4715f3..2ac52f3 100644
--- a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LivenessCheckScheduler.java
+++ b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/LivenessCheckScheduler.java
@@ -69,26 +69,26 @@
   @Override
   public ScheduledFuture scheduleTask() {
     return scheduler.scheduleWithFixedDelay(() -> {
-        try {
-          if (!table.getEntity(currentJMVersion.get(), processorId).getIsLeader()) {
-            LOG.info("Not the leader anymore. Shutting down LivenessCheckScheduler.");
-            scheduler.shutdownNow();
-            return;
-          }
-          LOG.info("Checking for list of live processors");
-          //Get the list of live processors published on the blob.
-          Set<String> currProcessors = new HashSet<>(blob.getLiveProcessorList());
-          //Get the list of live processors from the table. This is the current system state.
-          Set<String> liveProcessors = table.getActiveProcessorsList(currentJMVersion);
-          //Invoke listener if the table list is not consistent with the blob list.
-          if (!liveProcessors.equals(currProcessors)) {
-            liveProcessorsList.getAndSet(new ArrayList<>(liveProcessors));
-            listener.onStateChange();
-          }
-        } catch (Exception e) {
-          errorHandler.accept("Exception in Liveness Check Scheduler. Stopping the processor...");
+      try {
+        if (!table.getEntity(currentJMVersion.get(), processorId).getIsLeader()) {
+          LOG.info("Not the leader anymore. Shutting down LivenessCheckScheduler.");
+          scheduler.shutdownNow();
+          return;
         }
-      }, LIVENESS_CHECK_DELAY_SEC, LIVENESS_CHECK_DELAY_SEC, TimeUnit.SECONDS);
+        LOG.info("Checking for list of live processors");
+        //Get the list of live processors published on the blob.
+        Set<String> currProcessors = new HashSet<>(blob.getLiveProcessorList());
+        //Get the list of live processors from the table. This is the current system state.
+        Set<String> liveProcessors = table.getActiveProcessorsList(currentJMVersion);
+        //Invoke listener if the table list is not consistent with the blob list.
+        if (!liveProcessors.equals(currProcessors)) {
+          liveProcessorsList.getAndSet(new ArrayList<>(liveProcessors));
+          listener.onStateChange();
+        }
+      } catch (Exception e) {
+        errorHandler.accept("Exception in Liveness Check Scheduler. Stopping the processor...");
+      }
+    }, LIVENESS_CHECK_DELAY_SEC, LIVENESS_CHECK_DELAY_SEC, TimeUnit.SECONDS);
   }
 
   @Override
diff --git a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/RenewLeaseScheduler.java b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/RenewLeaseScheduler.java
index f158122..865e6e7 100644
--- a/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/RenewLeaseScheduler.java
+++ b/samza-azure/src/main/java/org/apache/samza/coordinator/scheduler/RenewLeaseScheduler.java
@@ -56,16 +56,16 @@
   @Override
   public ScheduledFuture scheduleTask() {
     return scheduler.scheduleWithFixedDelay(() -> {
-        try {
-          LOG.info("Renewing lease");
-          boolean status = leaseBlobManager.renewLease(leaseId.get());
-          if (!status) {
-            errorHandler.accept("Unable to renew lease. Continuing as non-leader.");
-          }
-        } catch (Exception e) {
-          errorHandler.accept("Exception in Renew Lease Scheduler. Continuing as non-leader.");
+      try {
+        LOG.info("Renewing lease");
+        boolean status = leaseBlobManager.renewLease(leaseId.get());
+        if (!status) {
+          errorHandler.accept("Unable to renew lease. Continuing as non-leader.");
         }
-      }, RENEW_LEASE_DELAY_SEC, RENEW_LEASE_DELAY_SEC, TimeUnit.SECONDS);
+      } catch (Exception e) {
+        errorHandler.accept("Exception in Renew Lease Scheduler. Continuing as non-leader.");
+      }
+    }, RENEW_LEASE_DELAY_SEC, RENEW_LEASE_DELAY_SEC, TimeUnit.SECONDS);
   }
 
   @Override
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/AzureBlobConfig.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/AzureBlobConfig.java
index 92d76c4..58f206e 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/azureblob/AzureBlobConfig.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/AzureBlobConfig.java
@@ -39,7 +39,8 @@
   public static final String SYSTEM_WRITER_FACTORY_CLASS_NAME_DEFAULT = "org.apache.samza.system.azureblob.avro.AzureBlobAvroWriterFactory";
 
   // Azure Storage Account name under which the Azure container representing this system is.
-  // System name = Azure container name (https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names)
+  // System name = Azure container name
+  // (https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata#container-names)
   public static final String SYSTEM_AZURE_ACCOUNT_NAME = Config.SENSITIVE_PREFIX + SYSTEM_AZUREBLOB_PREFIX + "account.name";
 
   // Azure Storage Account key associated with the Azure Storage Account
@@ -94,6 +95,18 @@
   public static final String SYSTEM_SUFFIX_RANDOM_STRING_TO_BLOB_NAME = SYSTEM_AZUREBLOB_PREFIX + "suffixRandomStringToBlobName";
   private static final boolean SYSTEM_SUFFIX_RANDOM_STRING_TO_BLOB_NAME_DEFAULT = true;
 
+  // full class name of an implementation of org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory
+  // this factory should return an implementation of org.apache.samza.system.azureblob.utils.BlobMetadataGenerator
+  // this generator will be invoked when a blob is committed to add metadata properties to it
+  public static final String SYSTEM_BLOB_METADATA_PROPERTIES_GENERATOR_FACTORY = SYSTEM_AZUREBLOB_PREFIX + "metadataPropertiesGeneratorFactory";
+  private static final String SYSTEM_BLOB_METADATA_PROPERTIES_GENERATOR_FACTORY_DEFAULT =
+      "org.apache.samza.system.azureblob.utils.NullBlobMetadataGeneratorFactory";
+
+  // Additional configs for the metadata generator should be prefixed with this string which is passed to the generator.
+  // for example, to pass a "key":"value" pair to the metadata generator, add config like
+  // systems.<system-name>.azureblob.metadataGeneratorConfig.<key> with value <value>
+  public static final String SYSTEM_BLOB_METADATA_GENERATOR_CONFIG_PREFIX = SYSTEM_AZUREBLOB_PREFIX + "metadataGeneratorConfig.";
+
   public AzureBlobConfig(Config config) {
     super(config);
   }
@@ -185,4 +198,13 @@
   public long getMaxMessagesPerBlob(String systemName) {
     return getLong(String.format(SYSTEM_MAX_MESSAGES_PER_BLOB, systemName), SYSTEM_MAX_MESSAGES_PER_BLOB_DEFAULT);
   }
+
+  public String getSystemBlobMetadataPropertiesGeneratorFactory(String systemName) {
+    return get(String.format(SYSTEM_BLOB_METADATA_PROPERTIES_GENERATOR_FACTORY, systemName),
+        SYSTEM_BLOB_METADATA_PROPERTIES_GENERATOR_FACTORY_DEFAULT);
+  }
+
+  public Config getSystemBlobMetadataGeneratorConfigs(String systemName) {
+    return subset(String.format(SYSTEM_BLOB_METADATA_GENERATOR_CONFIG_PREFIX, systemName));
+  }
 }
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriter.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriter.java
index 7f9a926..85c2b33 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriter.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriter.java
@@ -46,7 +46,9 @@
 import org.apache.avro.specific.SpecificDatumWriter;
 import org.apache.avro.specific.SpecificRecord;
 import org.apache.samza.SamzaException;
+import org.apache.samza.config.Config;
 import org.apache.samza.system.OutgoingMessageEnvelope;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -109,9 +111,13 @@
   private final boolean useRandomStringInBlobName;
   private final Object currentDataFileWriterLock = new Object();
   private volatile long recordsInCurrentBlob = 0;
+  private BlobMetadataGeneratorFactory blobMetadataGeneratorFactory;
+  private Config blobMetadataGeneratorConfig;
+  private String streamName;
 
   public AzureBlobAvroWriter(BlobContainerAsyncClient containerAsyncClient, String blobURLPrefix,
       Executor blobThreadPool, AzureBlobWriterMetrics metrics,
+      BlobMetadataGeneratorFactory blobMetadataGeneratorFactory, Config blobMetadataGeneratorConfig, String streamName,
       int maxBlockFlushThresholdSize, long flushTimeoutMs, Compression compression, boolean useRandomStringInBlobName,
       long maxBlobSize, long maxRecordsPerBlob) {
 
@@ -125,6 +131,9 @@
     this.useRandomStringInBlobName = useRandomStringInBlobName;
     this.maxBlobSize = maxBlobSize;
     this.maxRecordsPerBlob = maxRecordsPerBlob;
+    this.blobMetadataGeneratorFactory = blobMetadataGeneratorFactory;
+    this.blobMetadataGeneratorConfig = blobMetadataGeneratorConfig;
+    this.streamName = streamName;
   }
 
   /**
@@ -169,6 +178,9 @@
       }
       currentBlobWriterComponents.dataFileWriter.appendEncoded(ByteBuffer.wrap(encodedRecord));
       recordsInCurrentBlob++;
+      // incrementNumberOfRecordsInBlob should always be invoked every time appendEncoded above is invoked.
+      // this is to count the number records in a blob and then use that count as a metadata of the blob.
+      currentBlobWriterComponents.azureBlobOutputStream.incrementNumberOfRecordsInBlob();
     }
   }
   /**
@@ -201,13 +213,13 @@
         throw new IllegalStateException("Attempting to close an already closed AzureBlobAvroWriter");
       }
       allBlobWriterComponents.forEach(blobWriterComponents -> {
-          try {
-            closeDataFileWriter(blobWriterComponents.dataFileWriter, blobWriterComponents.azureBlobOutputStream,
-                blobWriterComponents.blockBlobAsyncClient);
-          } catch (IOException e) {
-            throw new SamzaException(e);
-          }
-        });
+        try {
+          closeDataFileWriter(blobWriterComponents.dataFileWriter, blobWriterComponents.azureBlobOutputStream,
+              blobWriterComponents.blockBlobAsyncClient);
+        } catch (IOException e) {
+          throw new SamzaException(e);
+        }
+      });
       isClosed = true;
     }
   }
@@ -217,6 +229,7 @@
       Executor blobThreadPool, int maxBlockFlushThresholdSize, int flushTimeoutMs, String blobURLPrefix,
       DataFileWriter<IndexedRecord> dataFileWriter,
       AzureBlobOutputStream azureBlobOutputStream, BlockBlobAsyncClient blockBlobAsyncClient,
+      BlobMetadataGeneratorFactory blobMetadataGeneratorFactory, Config blobMetadataGeneratorConfig, String streamName,
       long maxBlobSize, long maxRecordsPerBlob, Compression compression, boolean useRandomStringInBlobName) {
     if (dataFileWriter == null || azureBlobOutputStream == null || blockBlobAsyncClient == null) {
       this.currentBlobWriterComponents = null;
@@ -235,6 +248,9 @@
     this.useRandomStringInBlobName = useRandomStringInBlobName;
     this.maxBlobSize = maxBlobSize;
     this.maxRecordsPerBlob = maxRecordsPerBlob;
+    this.blobMetadataGeneratorFactory = blobMetadataGeneratorFactory;
+    this.blobMetadataGeneratorConfig = blobMetadataGeneratorConfig;
+    this.streamName = streamName;
   }
 
   @VisibleForTesting
@@ -271,10 +287,9 @@
       // dataFileWriter.close calls close of the azureBlobOutputStream associated with it.
       dataFileWriter.close();
     } catch (Exception e) {
-      // ensure that close is called even if dataFileWriter.close fails.
-      // This is to avoid loss of all the blocks uploaded for the blob
-      // as commitBlockList happens in close of azureBlobOutputStream.
-      azureBlobOutputStream.close();
+      LOG.error("Exception occurred during DataFileWriter.close for blob  "
+          + blockBlobAsyncClient.getBlobUrl()
+          + ". All blocks uploaded so far for this blob will be discarded to avoid invalid blobs.");
       throw e;
     }
   }
@@ -318,8 +333,14 @@
     BlockBlobAsyncClient blockBlobAsyncClient = containerAsyncClient.getBlobAsyncClient(blobURL).getBlockBlobAsyncClient();
 
     DataFileWriter<IndexedRecord> dataFileWriter = new DataFileWriter<>(datumWriter);
-    AzureBlobOutputStream azureBlobOutputStream = new AzureBlobOutputStream(blockBlobAsyncClient, blobThreadPool, metrics,
-            flushTimeoutMs, maxBlockFlushThresholdSize, compression);
+    AzureBlobOutputStream azureBlobOutputStream;
+    try {
+      azureBlobOutputStream = new AzureBlobOutputStream(blockBlobAsyncClient, blobThreadPool, metrics,
+          blobMetadataGeneratorFactory, blobMetadataGeneratorConfig,
+          streamName, flushTimeoutMs, maxBlockFlushThresholdSize, compression);
+    } catch (Exception e) {
+      throw new SamzaException("Unable to create AzureBlobOutputStream", e);
+    }
     dataFileWriter.create(schema, azureBlobOutputStream);
     dataFileWriter.setFlushOnEveryBlock(false);
     this.currentBlobWriterComponents = new BlobWriterComponents(dataFileWriter, azureBlobOutputStream, blockBlobAsyncClient);
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriterFactory.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriterFactory.java
index 2510766..0a4e019 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriterFactory.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobAvroWriterFactory.java
@@ -26,6 +26,8 @@
 import com.azure.storage.blob.BlobContainerAsyncClient;
 import java.io.IOException;
 import java.util.concurrent.Executor;
+import org.apache.samza.config.Config;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory;
 
 
 public class AzureBlobAvroWriterFactory implements AzureBlobWriterFactory {
@@ -35,9 +37,11 @@
    */
   public AzureBlobWriter getWriterInstance(BlobContainerAsyncClient containerAsyncClient, String blobURL,
       Executor blobUploadThreadPool, AzureBlobWriterMetrics metrics,
+      BlobMetadataGeneratorFactory blobMetadataGeneratorFactory, Config blobMetadataGeneratorConfig, String streamName,
       int maxBlockFlushThresholdSize, long flushTimeoutMs, Compression compression, boolean useRandomStringInBlobName,
       long maxBlobSize, long maxMessagesPerBlob) throws IOException {
     return new AzureBlobAvroWriter(containerAsyncClient, blobURL, blobUploadThreadPool, metrics,
-        maxBlockFlushThresholdSize, flushTimeoutMs, compression, useRandomStringInBlobName, maxBlobSize, maxMessagesPerBlob);
+          blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, streamName, maxBlockFlushThresholdSize, flushTimeoutMs,
+          compression, useRandomStringInBlobName, maxBlobSize, maxMessagesPerBlob);
   }
 }
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobOutputStream.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobOutputStream.java
index 9db15a3..e615808 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobOutputStream.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/avro/AzureBlobOutputStream.java
@@ -30,7 +30,6 @@
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Base64;
-import java.util.Collections;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
@@ -38,6 +37,10 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executor;
 import java.util.concurrent.TimeUnit;
+import org.apache.samza.config.Config;
+import org.apache.samza.system.azureblob.utils.BlobMetadataContext;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGenerator;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import reactor.core.publisher.Flux;
@@ -68,7 +71,6 @@
   private static final Logger LOG = LoggerFactory.getLogger(AzureBlobOutputStream.class);
   private static final int MAX_ATTEMPT = 3;
   private static final int MAX_BLOCKS_IN_AZURE_BLOB = 50000;
-  public static final String BLOB_RAW_SIZE_BYTES_METADATA = "rawSizeBytes";
   private final long flushTimeoutMs;
   private final BlockBlobAsyncClient blobAsyncClient;
   private final Executor blobThreadPool;
@@ -83,11 +85,29 @@
 
   private volatile boolean isClosed = false;
   private long totalUploadedBlockSize = 0;
+  private long totalNumberOfRecordsInBlob = 0;
   private int blockNum;
+  private final BlobMetadataGeneratorFactory blobMetadataGeneratorFactory;
+  private final Config blobMetadataGeneratorConfig;
+  private String streamName;
 
+  /**
+   *
+   * @param blobAsyncClient Client to communicate with Azure Blob Storage.
+   * @param blobThreadPool threads to be used for uploading blocks to Azure Blob Storage.
+   * @param metrics needed for emitting metrics about bytes written, blocks uploaded, blobs committed.
+   * @param blobMetadataGeneratorFactory impl of {@link org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory}
+   *                                   to be used for generating metadata properties for a blob
+   * @param streamName name of the stream to which the blob generated corresponds to. Used in metadata properties.
+   * @param flushTimeoutMs timeout for uploading a block
+   * @param maxBlockFlushThresholdSize max block size
+   * @param compression type of compression to be used before uploading a block
+   */
   public AzureBlobOutputStream(BlockBlobAsyncClient blobAsyncClient, Executor blobThreadPool, AzureBlobWriterMetrics metrics,
+      BlobMetadataGeneratorFactory blobMetadataGeneratorFactory, Config blobMetadataGeneratorConfig, String streamName,
       long flushTimeoutMs, int maxBlockFlushThresholdSize, Compression compression) {
-    this(blobAsyncClient, blobThreadPool, metrics, flushTimeoutMs, maxBlockFlushThresholdSize,
+    this(blobAsyncClient, blobThreadPool, metrics, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, streamName,
+        flushTimeoutMs, maxBlockFlushThresholdSize,
         new ByteArrayOutputStream(maxBlockFlushThresholdSize), compression);
   }
 
@@ -142,12 +162,15 @@
   /**
    * This api waits for all pending upload (stageBlock task) futures to finish.
    * It then synchronously commits the list of blocks to persist the actual blob on storage.
+   * Note: this method does not invoke flush and flush has to be explicitly called before close.
    * @throws IllegalStateException when
    *       - when closing an already closed stream
    * @throws RuntimeException when
    *       - byteArrayOutputStream.close fails or
    *       - any of the pending uploads fails or
    *       - blob's commitBlockList fails
+   * throws ClassNotFoundException or IllegalAccessException or InstantiationException
+   *       - while creating an instance of BlobMetadataGenerator
    */
   @Override
   public synchronized void close() {
@@ -172,8 +195,8 @@
       future.get((long) flushTimeoutMs, TimeUnit.MILLISECONDS);
       LOG.info("For blob: {} committing blockList size:{}", blobAsyncClient.getBlobUrl().toString(), blockList.size());
       metrics.updateAzureCommitMetrics();
-      Map<String, String> blobMetadata = Collections.singletonMap(BLOB_RAW_SIZE_BYTES_METADATA, Long.toString(totalUploadedBlockSize));
-      commitBlob(blockList, blobMetadata);
+      BlobMetadataGenerator blobMetadataGenerator = getBlobMetadataGenerator();
+      commitBlob(blockList, blobMetadataGenerator.getBlobMetadata(new BlobMetadataContext(streamName, totalUploadedBlockSize, totalNumberOfRecordsInBlob)));
     } catch (Exception e) {
       String msg = String.format("Close blob %s failed with exception. Total pending sends %d",
           blobAsyncClient.getBlobUrl().toString(), pendingUpload.size());
@@ -209,8 +232,23 @@
     }
   }
 
+  /**
+   * This method is to be used for tracking the number of records written to the outputstream.
+   * However, since records are written in chunks through write(byte[],int,int) method,
+   * it is possible that all records are not completely written until flush is invoked.
+   *
+   * Additionally, the count of number of records is intended to be used only as part of
+   * blob's metadata at blob commit time which happens at close.
+   * Thus, the totalNumberOfRecordsInBlob is not fetched until close method.
+   * Since flush is called before close, this totalNumberOfRecordsInBlob is accurate.
+   */
+  public synchronized void incrementNumberOfRecordsInBlob() {
+    totalNumberOfRecordsInBlob++;
+  }
+
   @VisibleForTesting
   AzureBlobOutputStream(BlockBlobAsyncClient blobAsyncClient, Executor blobThreadPool, AzureBlobWriterMetrics metrics,
+      BlobMetadataGeneratorFactory blobMetadataGeneratorFactory, Config blobMetadataGeneratorConfig, String streamName,
       long flushTimeoutMs, int maxBlockFlushThresholdSize,
       ByteArrayOutputStream byteArrayOutputStream, Compression compression) {
     this.byteArrayOutputStream = Optional.of(byteArrayOutputStream);
@@ -222,6 +260,9 @@
     this.maxBlockFlushThresholdSize = maxBlockFlushThresholdSize;
     this.metrics = metrics;
     this.compression = compression;
+    this.blobMetadataGeneratorFactory = blobMetadataGeneratorFactory;
+    this.blobMetadataGeneratorConfig = blobMetadataGeneratorConfig;
+    this.streamName = streamName;
   }
 
   // SAMZA-2476 stubbing BlockBlobAsyncClient.commitBlockListWithResponse was causing flaky tests.
@@ -245,6 +286,11 @@
     isClosed = true;
   }
 
+  @VisibleForTesting
+  BlobMetadataGenerator getBlobMetadataGenerator() throws Exception {
+    return blobMetadataGeneratorFactory.getBlobMetadataGeneratorInstance(blobMetadataGeneratorConfig);
+  }
+
   /**
    * This api will async upload the outputstream into block using stageBlocks,
    * reint outputstream
@@ -305,15 +351,15 @@
     pendingUpload.add(future);
 
     future.handle((aVoid, throwable) -> {
-        if (throwable == null) {
-          LOG.info("Upload block for blob: {} with blockid: {} finished.", blobAsyncClient.getBlobUrl().toString(), blockId);
-          pendingUpload.remove(future);
-          return aVoid;
-        } else {
-          throw new AzureException("Blob upload failed for blob " + blobAsyncClient.getBlobUrl().toString()
-              + " and block with id: " + blockId, throwable);
-        }
-      });
+      if (throwable == null) {
+        LOG.info("Upload block for blob: {} with blockid: {} finished.", blobAsyncClient.getBlobUrl().toString(), blockId);
+        pendingUpload.remove(future);
+        return aVoid;
+      } else {
+        throw new AzureException("Blob upload failed for blob " + blobAsyncClient.getBlobUrl().toString()
+            + " and block with id: " + blockId, throwable);
+      }
+    });
 
     blockNum += 1;
     if (blockNum >= MAX_BLOCKS_IN_AZURE_BLOB) {
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobSystemProducer.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobSystemProducer.java
index a2fa2ac..5ecd528 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobSystemProducer.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobSystemProducer.java
@@ -50,10 +50,12 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import org.apache.samza.config.Config;
 import org.apache.samza.metrics.MetricsRegistry;
 import org.apache.samza.system.OutgoingMessageEnvelope;
 import org.apache.samza.system.SystemProducer;
 import org.apache.samza.system.SystemProducerException;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -136,6 +138,9 @@
   private final Map<String, Object> sourceWriterCreationLockMap = new ConcurrentHashMap<>();
   private final Map<String, ReadWriteLock> sourceSendFlushLockMap = new ConcurrentHashMap<>();
 
+  private final BlobMetadataGeneratorFactory blobMetadataGeneratorFactory;
+  private final Config blobMetadataGeneratorConfig;
+
   public AzureBlobSystemProducer(String systemName, AzureBlobConfig config, MetricsRegistry metricsRegistry) {
     Preconditions.checkNotNull(systemName, "System name can not be null when creating AzureBlobSystemProducer");
     Preconditions.checkNotNull(config, "Config can not be null when creating AzureBlobSystemProducer");
@@ -171,6 +176,14 @@
     this.writerMap = new ConcurrentHashMap<>();
 
     this.metrics = new AzureBlobSystemProducerMetrics(systemName, config.getAzureAccountName(systemName), metricsRegistry);
+
+    String blobMetadataGeneratorFactoryClassName = this.config.getSystemBlobMetadataPropertiesGeneratorFactory(this.systemName);
+    try {
+      blobMetadataGeneratorFactory = (BlobMetadataGeneratorFactory) Class.forName(blobMetadataGeneratorFactoryClassName).newInstance();
+    } catch (Exception e) {
+      throw new SystemProducerException("Could not create blob metadata generator factory with name " + blobMetadataGeneratorFactoryClassName, e);
+    }
+    blobMetadataGeneratorConfig = this.config.getSystemBlobMetadataGeneratorConfigs(systemName);
   }
 
   /**
@@ -423,7 +436,7 @@
         if (writer == null) {
           AzureBlobWriterMetrics writerMetrics =
               new AzureBlobWriterMetrics(metrics.getAggregateMetrics(), metrics.getSystemMetrics(), metrics.getSourceMetrics(source));
-          writer = createNewWriter(blobURLPrefix, writerMetrics);
+          writer = createNewWriter(blobURLPrefix, writerMetrics, messageEnvelope.getSystemStream().getStream());
           sourceWriterMap.put(writerMapKey, writer);
         }
       }
@@ -458,41 +471,41 @@
 
   private void flushWriters(Map<String, AzureBlobWriter> sourceWriterMap) {
     sourceWriterMap.forEach((stream, writer) -> {
-        try {
-          LOG.info("Flushing topic:{}", stream);
-          writer.flush();
-        } catch (IOException e) {
-          throw new SystemProducerException("Close failed for topic " + stream, e);
-        }
-      });
+      try {
+        LOG.info("Flushing topic:{}", stream);
+        writer.flush();
+      } catch (IOException e) {
+        throw new SystemProducerException("Close failed for topic " + stream, e);
+      }
+    });
   }
 
   private void closeWriters(String source, Map<String, AzureBlobWriter> sourceWriterMap) throws Exception {
     Set<CompletableFuture<Void>> pendingClose = ConcurrentHashMap.newKeySet();
     try {
       sourceWriterMap.forEach((stream, writer) -> {
-          LOG.info("Closing topic:{}", stream);
-          CompletableFuture<Void> future = CompletableFuture.runAsync(new Runnable() {
-            @Override
-            public void run() {
-              try {
-                writer.close();
-              } catch (IOException e) {
-                throw new SystemProducerException("Close failed for topic " + stream, e);
-              }
+        LOG.info("Closing topic:{}", stream);
+        CompletableFuture<Void> future = CompletableFuture.runAsync(new Runnable() {
+          @Override
+          public void run() {
+            try {
+              writer.close();
+            } catch (IOException e) {
+              throw new SystemProducerException("Close failed for topic " + stream, e);
             }
-          }, asyncBlobThreadPool);
-          pendingClose.add(future);
-          future.handle((aVoid, throwable) -> {
-              sourceWriterMap.remove(writer);
-              if (throwable != null) {
-                throw new SystemProducerException("Close failed for topic " + stream, throwable);
-              } else {
-                LOG.info("Blob close finished for stream " + stream);
-                return aVoid;
-              }
-            });
+          }
+        }, asyncBlobThreadPool);
+        pendingClose.add(future);
+        future.handle((aVoid, throwable) -> {
+          sourceWriterMap.remove(writer);
+          if (throwable != null) {
+            throw new SystemProducerException("Close failed for topic " + stream, throwable);
+          } else {
+            LOG.info("Blob close finished for stream " + stream);
+            return aVoid;
+          }
         });
+      });
       CompletableFuture<Void> future = CompletableFuture.allOf(pendingClose.toArray(new CompletableFuture[0]));
       LOG.info("Flush source: {} has pending closes: {} ", source, pendingClose.size());
       future.get((long) closeTimeout, TimeUnit.MILLISECONDS);
@@ -502,9 +515,10 @@
   }
 
   @VisibleForTesting
-  AzureBlobWriter createNewWriter(String blobURL, AzureBlobWriterMetrics writerMetrics) {
+  AzureBlobWriter createNewWriter(String blobURL, AzureBlobWriterMetrics writerMetrics, String streamName) {
     try {
       return writerFactory.getWriterInstance(containerAsyncClient, blobURL, asyncBlobThreadPool, writerMetrics,
+          blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, streamName,
           blockFlushThresholdSize, flushTimeoutMs,
           CompressionFactory.getInstance().getCompression(config.getCompressionType(systemName)),
           config.getSuffixRandomStringToBlobName(systemName),
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobWriterFactory.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobWriterFactory.java
index 87ca5a6..e9fa16d 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobWriterFactory.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/producer/AzureBlobWriterFactory.java
@@ -23,6 +23,8 @@
 import com.azure.storage.blob.BlobContainerAsyncClient;
 import java.io.IOException;
 import java.util.concurrent.Executor;
+import org.apache.samza.config.Config;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory;
 
 
 public interface AzureBlobWriterFactory {
@@ -32,6 +34,8 @@
    * @param blobURL Azure blob url
    * @param blobUploadThreadPool thread pool to be used by writer for uploading
    * @param metrics metrics to measure the number of bytes written by writer
+   * @param blobMetadataGeneratorFactory factory to get generator for metadata properties for a blob
+   * @param streamName name of the stream that this AzureBlobWriter is associated with
    * @param maxBlockFlushThresholdSize threshold at which to upload
    * @param flushTimeoutMs timeout after which the flush is abandoned
    * @return AzureBlobWriter instance
@@ -39,6 +43,7 @@
    */
   AzureBlobWriter getWriterInstance(BlobContainerAsyncClient containerAsyncClient, String blobURL,
       Executor blobUploadThreadPool, AzureBlobWriterMetrics metrics,
+      BlobMetadataGeneratorFactory blobMetadataGeneratorFactory, Config blobMetadataGeneratorConfig, String streamName,
       int maxBlockFlushThresholdSize, long flushTimeoutMs, Compression compression, boolean useRandomStringInBlobName,
       long maxBlobSize, long maxMessagesPerBlob) throws IOException;
 }
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataContext.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataContext.java
new file mode 100644
index 0000000..93ed0a5
--- /dev/null
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataContext.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.system.azureblob.utils;
+
+/**
+ * Properties about a Blob which can then be used to generate metadata for the blob
+ */
+public class BlobMetadataContext {
+  private final String streamName;
+  private final long blobSize;
+  private final long numberOfMessagesInBlob;
+
+  public BlobMetadataContext(String streamName, long blobSize, long numberOfMessagesInBlob) {
+    this.streamName = streamName;
+    this.blobSize = blobSize;
+    this.numberOfMessagesInBlob = numberOfMessagesInBlob;
+  }
+
+  public String getStreamName() {
+    return streamName;
+  }
+
+  public long getBlobSize() {
+    return blobSize;
+  }
+
+  public long getNumberOfMessagesInBlob() {
+    return numberOfMessagesInBlob;
+  }
+}
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataGenerator.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataGenerator.java
new file mode 100644
index 0000000..76f5f70
--- /dev/null
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataGenerator.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.system.azureblob.utils;
+
+import java.util.Map;
+
+
+/**
+ * Interface for generating metadata properties of an Azure Blob.
+ * Implementation is not expected to be thread safe.
+ */
+public interface BlobMetadataGenerator {
+
+  /**
+   * create metadata properties for a blob
+   * @param blobMetatadataConext contains details about the blob that can be used for generating metadata
+   * @return map containing metadata properties to be associated with the blob
+   */
+  Map<String, String> getBlobMetadata(BlobMetadataContext blobMetatadataConext);
+}
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataGeneratorFactory.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataGeneratorFactory.java
new file mode 100644
index 0000000..61661a4
--- /dev/null
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/BlobMetadataGeneratorFactory.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.system.azureblob.utils;
+
+import org.apache.samza.config.Config;
+
+
+public interface BlobMetadataGeneratorFactory {
+
+  /**
+   * Creates an instance of {@link BlobMetadataGenerator}
+   */
+  BlobMetadataGenerator getBlobMetadataGeneratorInstance(Config blobMetadataGeneratorConfig);
+}
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/NullBlobMetadataGenerator.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/NullBlobMetadataGenerator.java
new file mode 100644
index 0000000..6b6efd2
--- /dev/null
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/NullBlobMetadataGenerator.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.system.azureblob.utils;
+
+import java.util.Map;
+
+
+public class NullBlobMetadataGenerator implements BlobMetadataGenerator {
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public Map<String, String> getBlobMetadata(BlobMetadataContext blobMetadataContext) {
+    return null;
+  }
+}
diff --git a/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/NullBlobMetadataGeneratorFactory.java b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/NullBlobMetadataGeneratorFactory.java
new file mode 100644
index 0000000..107e1c6
--- /dev/null
+++ b/samza-azure/src/main/java/org/apache/samza/system/azureblob/utils/NullBlobMetadataGeneratorFactory.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.system.azureblob.utils;
+
+import org.apache.samza.config.Config;
+
+
+public class NullBlobMetadataGeneratorFactory implements BlobMetadataGeneratorFactory {
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public BlobMetadataGenerator getBlobMetadataGeneratorInstance(Config blobMetadataGeneratorConfig) {
+    return new NullBlobMetadataGenerator();
+  }
+}
diff --git a/samza-azure/src/main/java/org/apache/samza/system/eventhub/EventHubConfig.java b/samza-azure/src/main/java/org/apache/samza/system/eventhub/EventHubConfig.java
index 69a921e..e7e82ae 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/eventhub/EventHubConfig.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/eventhub/EventHubConfig.java
@@ -110,10 +110,10 @@
     LOG.info("Building mappings from physicalName to streamId");
     streamConfig.getStreamIds()
         .forEach((streamId) -> {
-            String physicalName = streamConfig.getPhysicalName(streamId);
-            LOG.info("Obtained physicalName: {} for streamId: {} ", physicalName, streamId);
-            physcialToId.put(physicalName, streamId);
-          });
+          String physicalName = streamConfig.getPhysicalName(streamId);
+          LOG.info("Obtained physicalName: {} for streamId: {} ", physicalName, streamId);
+          physcialToId.put(physicalName, streamId);
+        });
   }
 
   private String getFromStreamIdOrName(String configName, String streamName, String defaultString) {
diff --git a/samza-azure/src/main/java/org/apache/samza/system/eventhub/admin/EventHubSystemAdmin.java b/samza-azure/src/main/java/org/apache/samza/system/eventhub/admin/EventHubSystemAdmin.java
index 2d22929..2103b1c 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/eventhub/admin/EventHubSystemAdmin.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/eventhub/admin/EventHubSystemAdmin.java
@@ -187,15 +187,15 @@
               .getPartitionRuntimeInformation(partition);
       futureList.add(partitionRuntimeInfo);
       partitionRuntimeInfo.thenAccept(ehPartitionInfo -> {
-          LOG.info(printPartitionRuntimeInfo(ehPartitionInfo));
-          // Set offsets
-          String startingOffset = EventHubSystemConsumer.START_OF_STREAM;
-          String newestOffset = ehPartitionInfo.getLastEnqueuedOffset();
-          String upcomingOffset = EventHubSystemConsumer.END_OF_STREAM;
-          SystemStreamPartitionMetadata sspMetadata = new SystemStreamPartitionMetadata(startingOffset, newestOffset,
-            upcomingOffset);
-          sspMetadataMap.put(new Partition(Integer.parseInt(partition)), sspMetadata);
-        });
+        LOG.info(printPartitionRuntimeInfo(ehPartitionInfo));
+        // Set offsets
+        String startingOffset = EventHubSystemConsumer.START_OF_STREAM;
+        String newestOffset = ehPartitionInfo.getLastEnqueuedOffset();
+        String upcomingOffset = EventHubSystemConsumer.END_OF_STREAM;
+        SystemStreamPartitionMetadata sspMetadata = new SystemStreamPartitionMetadata(startingOffset, newestOffset,
+          upcomingOffset);
+        sspMetadataMap.put(new Partition(Integer.parseInt(partition)), sspMetadata);
+      });
     }
 
     CompletableFuture<Void> futureGroup =
diff --git a/samza-azure/src/main/java/org/apache/samza/system/eventhub/consumer/EventHubSystemConsumer.java b/samza-azure/src/main/java/org/apache/samza/system/eventhub/consumer/EventHubSystemConsumer.java
index 2f2873e..a6d975f 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/eventhub/consumer/EventHubSystemConsumer.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/eventhub/consumer/EventHubSystemConsumer.java
@@ -467,29 +467,29 @@
     public void onReceive(Iterable<EventData> events) {
       if (events != null) {
         events.forEach(event -> {
-            byte[] eventDataBody = event.getBytes();
-            if (interceptor != null) {
-              eventDataBody = interceptor.intercept(eventDataBody);
-            }
-            String offset = event.getSystemProperties().getOffset();
-            Object partitionKey = event.getSystemProperties().getPartitionKey();
-            if (partitionKey == null) {
-              partitionKey = event.getProperties().get(EventHubSystemProducer.KEY);
-            }
-            try {
-              updateMetrics(event);
+          byte[] eventDataBody = event.getBytes();
+          if (interceptor != null) {
+            eventDataBody = interceptor.intercept(eventDataBody);
+          }
+          String offset = event.getSystemProperties().getOffset();
+          Object partitionKey = event.getSystemProperties().getPartitionKey();
+          if (partitionKey == null) {
+            partitionKey = event.getProperties().get(EventHubSystemProducer.KEY);
+          }
+          try {
+            updateMetrics(event);
 
-              // note that the partition key can be null
-              put(ssp, new EventHubIncomingMessageEnvelope(ssp, offset, partitionKey, eventDataBody, event));
-            } catch (InterruptedException e) {
-              String msg = String.format("Interrupted while adding the event from ssp %s to dispatch queue.", ssp);
-              LOG.error(msg, e);
-              throw new SamzaException(msg, e);
-            }
+            // note that the partition key can be null
+            put(ssp, new EventHubIncomingMessageEnvelope(ssp, offset, partitionKey, eventDataBody, event));
+          } catch (InterruptedException e) {
+            String msg = String.format("Interrupted while adding the event from ssp %s to dispatch queue.", ssp);
+            LOG.error(msg, e);
+            throw new SamzaException(msg, e);
+          }
 
-            // Cache latest checkpoint
-            streamPartitionOffsets.put(ssp, offset);
-          });
+          // Cache latest checkpoint
+          streamPartitionOffsets.put(ssp, offset);
+        });
       }
     }
 
diff --git a/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/AsyncSystemProducer.java b/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/AsyncSystemProducer.java
index 83d51ed..7b6a82e 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/AsyncSystemProducer.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/AsyncSystemProducer.java
@@ -140,25 +140,25 @@
 
     // Auto update the metrics and possible throwable when futures are complete.
     sendResult.handle((aVoid, throwable) -> {
-        long callbackLatencyMs = System.currentTimeMillis() - afterSendTimeMs;
-        sendCallbackLatency.get(streamId).update(callbackLatencyMs);
-        aggSendCallbackLatency.update(callbackLatencyMs);
-        if (throwable != null) {
-          sendErrors.get(streamId).inc();
-          aggSendErrors.inc();
-          LOG.error("Send message to event hub: {} failed with exception: ", streamId, throwable);
-          sendExceptionOnCallback.compareAndSet(null, throwable);
-        }
-        return aVoid;
-      });
+      long callbackLatencyMs = System.currentTimeMillis() - afterSendTimeMs;
+      sendCallbackLatency.get(streamId).update(callbackLatencyMs);
+      aggSendCallbackLatency.update(callbackLatencyMs);
+      if (throwable != null) {
+        sendErrors.get(streamId).inc();
+        aggSendErrors.inc();
+        LOG.error("Send message to event hub: {} failed with exception: ", streamId, throwable);
+        sendExceptionOnCallback.compareAndSet(null, throwable);
+      }
+      return aVoid;
+    });
   }
 
   public void start() {
     streamIds.forEach(streamId -> {
-        sendCallbackLatency.put(streamId, new SamzaHistogram(metricsRegistry, streamId, SEND_CALLBACK_LATENCY));
-        sendLatency.put(streamId, new SamzaHistogram(metricsRegistry, streamId, SEND_LATENCY));
-        sendErrors.put(streamId, metricsRegistry.newCounter(streamId, SEND_ERRORS));
-      });
+      sendCallbackLatency.put(streamId, new SamzaHistogram(metricsRegistry, streamId, SEND_CALLBACK_LATENCY));
+      sendLatency.put(streamId, new SamzaHistogram(metricsRegistry, streamId, SEND_LATENCY));
+      sendErrors.put(streamId, metricsRegistry.newCounter(streamId, SEND_ERRORS));
+    });
 
     if (aggSendLatency == null) {
       aggSendLatency = new SamzaHistogram(metricsRegistry, AGGREGATE, SEND_LATENCY);
diff --git a/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/EventHubSystemProducer.java b/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/EventHubSystemProducer.java
index f021f36..3912bcf 100644
--- a/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/EventHubSystemProducer.java
+++ b/samza-azure/src/main/java/org/apache/samza/system/eventhub/producer/EventHubSystemProducer.java
@@ -152,32 +152,32 @@
     if (PartitioningMethod.PARTITION_KEY_AS_PARTITION.equals(partitioningMethod)) {
       // Create all partition senders
       perStreamEventHubClientManagers.forEach((streamId, samzaEventHubClient) -> {
-          EventHubClient ehClient = samzaEventHubClient.getEventHubClient();
+        EventHubClient ehClient = samzaEventHubClient.getEventHubClient();
 
-          try {
-            Map<Integer, PartitionSender> partitionSenders = new HashMap<>();
-            long timeoutMs = config.getRuntimeInfoWaitTimeMS(systemName);
-            Integer numPartitions =
-                ehClient.getRuntimeInformation().get(timeoutMs, TimeUnit.MILLISECONDS).getPartitionCount();
+        try {
+          Map<Integer, PartitionSender> partitionSenders = new HashMap<>();
+          long timeoutMs = config.getRuntimeInfoWaitTimeMS(systemName);
+          Integer numPartitions =
+              ehClient.getRuntimeInformation().get(timeoutMs, TimeUnit.MILLISECONDS).getPartitionCount();
 
-            for (int i = 0; i < numPartitions; i++) {
-              String partitionId = String.valueOf(i);
-              EventHubClientManager perPartitionClientManager =
-                  createOrGetEventHubClientManagerForPartition(streamId, i);
-              PartitionSender partitionSender =
-                  perPartitionClientManager.getEventHubClient().createPartitionSender(partitionId).get(DEFAULT_CREATE_PARTITION_SENDER_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
-              partitionSenders.put(i, partitionSender);
-            }
-
-            streamPartitionSenders.put(streamId, partitionSenders);
-          } catch (InterruptedException | ExecutionException | TimeoutException e) {
-            String msg = "Failed to fetch number of Event Hub partitions for partition sender creation";
-            throw new SamzaException(msg, e);
-          } catch (EventHubException | IllegalArgumentException e) {
-            String msg = "Creation of partition sender failed with exception";
-            throw new SamzaException(msg, e);
+          for (int i = 0; i < numPartitions; i++) {
+            String partitionId = String.valueOf(i);
+            EventHubClientManager perPartitionClientManager =
+                createOrGetEventHubClientManagerForPartition(streamId, i);
+            PartitionSender partitionSender =
+                perPartitionClientManager.getEventHubClient().createPartitionSender(partitionId).get(DEFAULT_CREATE_PARTITION_SENDER_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
+            partitionSenders.put(i, partitionSender);
           }
-        });
+
+          streamPartitionSenders.put(streamId, partitionSenders);
+        } catch (InterruptedException | ExecutionException | TimeoutException e) {
+          String msg = "Failed to fetch number of Event Hub partitions for partition sender creation";
+          throw new SamzaException(msg, e);
+        } catch (EventHubException | IllegalArgumentException e) {
+          String msg = "Creation of partition sender failed with exception";
+          throw new SamzaException(msg, e);
+        }
+      });
     }
     isInitialized = true;
     LOG.info("EventHubSystemProducer initialized.");
@@ -227,10 +227,10 @@
 
     // Initiate metrics
     streamIds.forEach((streamId) -> {
-        eventSkipRate.put(streamId, metricsRegistry.newCounter(streamId, EVENT_SKIP_RATE));
-        eventWriteRate.put(streamId, metricsRegistry.newCounter(streamId, EVENT_WRITE_RATE));
-        eventByteWriteRate.put(streamId, metricsRegistry.newCounter(streamId, EVENT_BYTE_WRITE_RATE));
-      });
+      eventSkipRate.put(streamId, metricsRegistry.newCounter(streamId, EVENT_SKIP_RATE));
+      eventWriteRate.put(streamId, metricsRegistry.newCounter(streamId, EVENT_WRITE_RATE));
+      eventByteWriteRate.put(streamId, metricsRegistry.newCounter(streamId, EVENT_BYTE_WRITE_RATE));
+    });
 
     // Locking to ensure that these aggregated metrics will be created only once across multiple system producers.
     synchronized (AGGREGATE_METRICS_LOCK) {
@@ -365,15 +365,15 @@
   public synchronized void stop() {
     LOG.info("Stopping producer.");
     streamPartitionSenders.values().forEach((streamPartitionSender) -> {
-        List<CompletableFuture<Void>> futures = new ArrayList<>();
-        streamPartitionSender.forEach((key, value) -> futures.add(value.close()));
-        CompletableFuture<Void> future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
-        try {
-          future.get(DEFAULT_SHUTDOWN_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
-        } catch (ExecutionException | InterruptedException | TimeoutException e) {
-          LOG.error("Closing the partition sender failed ", e);
-        }
-      });
+      List<CompletableFuture<Void>> futures = new ArrayList<>();
+      streamPartitionSender.forEach((key, value) -> futures.add(value.close()));
+      CompletableFuture<Void> future = CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
+      try {
+        future.get(DEFAULT_SHUTDOWN_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
+      } catch (ExecutionException | InterruptedException | TimeoutException e) {
+        LOG.error("Closing the partition sender failed ", e);
+      }
+    });
     perStreamEventHubClientManagers.values()
         .parallelStream()
         .forEach(ehClient -> ehClient.close(DEFAULT_SHUTDOWN_TIMEOUT_MILLIS));
diff --git a/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobAvroWriter.java b/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobAvroWriter.java
index b4ef4b4..51d2cdc 100644
--- a/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobAvroWriter.java
+++ b/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobAvroWriter.java
@@ -45,8 +45,10 @@
 import org.apache.avro.specific.SpecificDatumWriter;
 import org.apache.avro.specific.SpecificRecord;
 import org.apache.samza.SamzaException;
+import org.apache.samza.config.Config;
 import org.apache.samza.system.OutgoingMessageEnvelope;
 import org.apache.samza.system.SystemStream;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -61,6 +63,7 @@
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -79,6 +82,9 @@
   private BlockBlobAsyncClient mockBlockBlobAsyncClient;
   private Compression mockCompression;
 
+  private final BlobMetadataGeneratorFactory blobMetadataGeneratorFactory = mock(BlobMetadataGeneratorFactory.class);
+  private final Config blobMetadataGeneratorConfig = mock(Config.class);
+  private static final String STREAM_NAME = "FAKE_STREAM";
   private static final String VALUE = "FAKE_VALUE";
   private static final String SYSTEM_NAME = "FAKE_SYSTEM";
   private static final int THRESHOLD = 100;
@@ -146,8 +152,9 @@
     mockCompression = CompressionFactory.getInstance().getCompression(CompressionType.GZIP);
     azureBlobAvroWriter =
         spy(new AzureBlobAvroWriter(mockContainerAsyncClient, mock(AzureBlobWriterMetrics.class), threadPool, THRESHOLD,
-            60000, "test", mockDataFileWriter, mockAzureBlobOutputStream, mockBlockBlobAsyncClient, Long.MAX_VALUE,
-            Long.MAX_VALUE, mockCompression, false)); // keeping blob size and number of records unlimited
+            60000, "test", mockDataFileWriter, mockAzureBlobOutputStream, mockBlockBlobAsyncClient,
+            blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
+            Long.MAX_VALUE, Long.MAX_VALUE, mockCompression, false)); // keeping blob size and number of records unlimited
     doReturn(encodedRecord).when(azureBlobAvroWriter).encodeRecord((IndexedRecord) ome.getMessage());
   }
   @Test
@@ -157,6 +164,7 @@
       azureBlobAvroWriter.write(ome);
     }
     verify(mockDataFileWriter, times(numberOfMessages)).appendEncoded(ByteBuffer.wrap(encodedRecord));
+    verify(mockAzureBlobOutputStream, times(numberOfMessages)).incrementNumberOfRecordsInBlob();
   }
 
   @Test
@@ -168,6 +176,7 @@
       azureBlobAvroWriter.write(omeGenericRecord);
     }
     verify(mockDataFileWriter, times(numberOfMessages)).appendEncoded(ByteBuffer.wrap(encodedRecord));
+    verify(mockAzureBlobOutputStream, times(numberOfMessages)).incrementNumberOfRecordsInBlob();
   }
 
   @Test
@@ -180,6 +189,7 @@
     }
     verify(mockDataFileWriter).appendEncoded(ByteBuffer.wrap(encodedRecord));
     verify(mockDataFileWriter, times(numberOfMessages)).appendEncoded(ByteBuffer.wrap((byte[]) omeEncoded.getMessage()));
+    verify(mockAzureBlobOutputStream, times(numberOfMessages + 1)).incrementNumberOfRecordsInBlob(); // +1 to account for first ome which is not encoded
   }
 
   @Test(expected = IllegalStateException.class)
@@ -187,7 +197,8 @@
     azureBlobAvroWriter =
         spy(new AzureBlobAvroWriter(PowerMockito.mock(BlobContainerAsyncClient.class), mock(AzureBlobWriterMetrics.class),
             threadPool, THRESHOLD, 60000, "test",
-            null, null, null, 1000, 100, mockCompression, false));
+            null, null, null, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
+            1000, 100, mockCompression, false));
     OutgoingMessageEnvelope omeEncoded = new OutgoingMessageEnvelope(new SystemStream(SYSTEM_NAME, "Topic1"), new byte[100]);
     azureBlobAvroWriter.write(omeEncoded);
   }
@@ -210,7 +221,7 @@
 
     azureBlobAvroWriter.flush();
     azureBlobAvroWriter.close();
-    verify(mockAzureBlobOutputStream).close();
+    verify(mockAzureBlobOutputStream, never()).close();
   }
 
   @Test(expected = RuntimeException.class)
@@ -242,7 +253,8 @@
     BlobContainerAsyncClient mockContainerClient = PowerMockito.mock(BlobContainerAsyncClient.class);
     azureBlobAvroWriter = spy(new AzureBlobAvroWriter(mockContainerClient,
         mockMetrics, threadPool, THRESHOLD, 60000, blobUrlPrefix,
-        null, null, null, maxBlobSize, 10, mockCompression, true));
+        null, null, null, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
+        maxBlobSize, 10, mockCompression, true));
 
     DataFileWriter mockDataFileWriter1 = mock(DataFileWriter.class);
     PowerMockito.whenNew(DataFileWriter.class).withAnyArguments().thenReturn(mockDataFileWriter1);
@@ -254,7 +266,7 @@
 
     AzureBlobOutputStream mockAzureBlobOutputStream1 = mock(AzureBlobOutputStream.class);
     PowerMockito.whenNew(AzureBlobOutputStream.class).withArguments(mockBlockBlobAsyncClient1, threadPool,
-        mockMetrics,
+        mockMetrics, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
         (long) 60000, THRESHOLD, mockCompression).thenReturn(mockAzureBlobOutputStream1);
     when(mockAzureBlobOutputStream1.getSize()).thenReturn((long) maxBlobSize - 1);
 
@@ -272,7 +284,7 @@
 
     AzureBlobOutputStream mockAzureBlobOutputStream2 = mock(AzureBlobOutputStream.class);
     PowerMockito.whenNew(AzureBlobOutputStream.class).withArguments(mockBlockBlobAsyncClient2, threadPool,
-        mockMetrics,
+        mockMetrics, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
         (long) 60000, THRESHOLD, mockCompression).thenReturn(mockAzureBlobOutputStream2);
     when(mockAzureBlobOutputStream2.getSize()).thenReturn((long) maxBlobSize - 1);
 
@@ -282,8 +294,8 @@
     ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
     verify(mockContainerClient, times(2)).getBlobAsyncClient(argument.capture());
     argument.getAllValues().forEach(blobName -> {
-        Assert.assertTrue(blobName.contains(blobUrlPrefix));
-      });
+      Assert.assertTrue(blobName.contains(blobUrlPrefix));
+    });
     List<String> allBlobNames = argument.getAllValues();
     Assert.assertNotEquals(allBlobNames.get(0), allBlobNames.get(1));
 
@@ -304,7 +316,8 @@
     BlobContainerAsyncClient mockContainerClient = PowerMockito.mock(BlobContainerAsyncClient.class);
     azureBlobAvroWriter = spy(new AzureBlobAvroWriter(mockContainerClient,
         mockMetrics, threadPool, THRESHOLD, 60000, blobUrlPrefix,
-        null, null, null, maxBlobSize, maxRecordsPerBlob, mockCompression, true));
+        null, null, null, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
+        maxBlobSize, maxRecordsPerBlob, mockCompression, true));
 
     DataFileWriter mockDataFileWriter1 = mock(DataFileWriter.class);
     PowerMockito.whenNew(DataFileWriter.class).withAnyArguments().thenReturn(mockDataFileWriter1);
@@ -316,7 +329,7 @@
 
     AzureBlobOutputStream mockAzureBlobOutputStream1 = mock(AzureBlobOutputStream.class);
     PowerMockito.whenNew(AzureBlobOutputStream.class).withArguments(mockBlockBlobAsyncClient1, threadPool,
-        mockMetrics,
+        mockMetrics, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
         (long) 60000, THRESHOLD, mockCompression).thenReturn(mockAzureBlobOutputStream1);
     when(mockAzureBlobOutputStream1.getSize()).thenReturn((long) 1);
 
@@ -337,7 +350,7 @@
 
     AzureBlobOutputStream mockAzureBlobOutputStream2 = mock(AzureBlobOutputStream.class);
     PowerMockito.whenNew(AzureBlobOutputStream.class).withArguments(mockBlockBlobAsyncClient2, threadPool,
-        mockMetrics,
+        mockMetrics, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
         (long) 60000, THRESHOLD, mockCompression).thenReturn(mockAzureBlobOutputStream2);
     when(mockAzureBlobOutputStream2.getSize()).thenReturn((long) 1);
 
@@ -346,8 +359,8 @@
     ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
     verify(mockContainerClient, times(2)).getBlobAsyncClient(argument.capture());
     argument.getAllValues().forEach(blobName -> {
-        Assert.assertTrue(blobName.contains(blobUrlPrefix));
-      });
+      Assert.assertTrue(blobName.contains(blobUrlPrefix));
+    });
     List<String> allBlobNames = argument.getAllValues();
     Assert.assertNotEquals(allBlobNames.get(0), allBlobNames.get(1));
 
@@ -366,8 +379,8 @@
     BlobContainerAsyncClient mockContainerClient = PowerMockito.mock(BlobContainerAsyncClient.class);
     azureBlobAvroWriter = spy(new AzureBlobAvroWriter(mockContainerClient,
         mock(AzureBlobWriterMetrics.class), threadPool, THRESHOLD, 60000, blobUrlPrefix,
-        mockDataFileWriter, mockAzureBlobOutputStream, mockBlockBlobAsyncClient, maxBlobSize, maxRecordsPerBlob,
-        mockCompression, false));
+        mockDataFileWriter, mockAzureBlobOutputStream, mockBlockBlobAsyncClient, blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
+        maxBlobSize, maxRecordsPerBlob, mockCompression, false));
 
     DataFileWriter<IndexedRecord> mockDataFileWriter2 = mock(DataFileWriter.class);
     AzureBlobOutputStream mockAzureBlobOutputStream2 = mock(AzureBlobOutputStream.class);
@@ -393,6 +406,7 @@
     azureBlobAvroWriter = spy(new AzureBlobAvroWriter(PowerMockito.mock(BlobContainerAsyncClient.class),
         mock(AzureBlobWriterMetrics.class), threadPool, THRESHOLD,
         60000, "test", mockDataFileWriter, mockAzureBlobOutputStream, mockBlockBlobAsyncClient,
+        blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, STREAM_NAME,
         Long.MAX_VALUE, Long.MAX_VALUE, mockCompression, false));
     IndexedRecord record = new GenericRecordEvent();
     Assert.assertTrue(Arrays.equals(encodeRecord(record), azureBlobAvroWriter.encodeRecord(record)));
@@ -411,6 +425,7 @@
 
     verify(mockDataFileWriter, times(10)).appendEncoded(ByteBuffer.wrap(encodedRecord));
     verify(mockDataFileWriter, times(10)).appendEncoded(ByteBuffer.wrap(encodeRecord((IndexedRecord) ome2.getMessage())));
+    verify(mockAzureBlobOutputStream, times(20)).incrementNumberOfRecordsInBlob();
   }
 
   @Test
@@ -424,6 +439,7 @@
     t2.join(60000);
 
     verify(mockDataFileWriter, times(10)).appendEncoded(ByteBuffer.wrap(encodedRecord));
+    verify(mockAzureBlobOutputStream, times(10)).incrementNumberOfRecordsInBlob();
     verify(mockDataFileWriter).flush();
   }
 
@@ -441,6 +457,7 @@
     verify(mockDataFileWriter, times(10)).appendEncoded(ByteBuffer.wrap(encodedRecord));
     verify(mockDataFileWriter, times(10)).appendEncoded(ByteBuffer.wrap(encodeRecord((IndexedRecord) ome2.getMessage())));
     verify(mockDataFileWriter, times(2)).flush();
+    verify(mockAzureBlobOutputStream, times(20)).incrementNumberOfRecordsInBlob();
   }
 
   @Test
@@ -459,6 +476,7 @@
     verify(mockDataFileWriter, times(10)).appendEncoded(ByteBuffer.wrap(encodeRecord((IndexedRecord) ome2.getMessage())));
     verify(mockDataFileWriter, times(2)).flush();
     verify(mockDataFileWriter).close();
+    verify(mockAzureBlobOutputStream, times(20)).incrementNumberOfRecordsInBlob();
   }
 
   private byte[] encodeRecord(IndexedRecord record) throws Exception {
diff --git a/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobOutputStream.java b/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobOutputStream.java
index d635693..b713ec7 100644
--- a/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobOutputStream.java
+++ b/samza-azure/src/test/java/org/apache/samza/system/azureblob/avro/TestAzureBlobOutputStream.java
@@ -20,6 +20,7 @@
 package org.apache.samza.system.azureblob.avro;
 
 import java.util.Arrays;
+import java.util.HashMap;
 import org.apache.samza.AzureException;
 import org.apache.samza.system.azureblob.compression.Compression;
 import org.apache.samza.system.azureblob.producer.AzureBlobWriterMetrics;
@@ -33,6 +34,10 @@
 import java.util.concurrent.LinkedBlockingDeque;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import org.apache.samza.config.Config;
+import org.apache.samza.system.azureblob.utils.BlobMetadataContext;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGenerator;
+import org.apache.samza.system.azureblob.utils.BlobMetadataGeneratorFactory;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -47,7 +52,9 @@
 import static org.mockito.Mockito.anyInt;
 import static org.mockito.Mockito.anyLong;
 import static org.mockito.Mockito.anyMap;
+import static org.mockito.Mockito.anyObject;
 import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
@@ -72,6 +79,12 @@
   private static final byte[] COMPRESSED_BYTES = RANDOM_STRING.substring(0, THRESHOLD / 2).getBytes();
   private AzureBlobWriterMetrics mockMetrics;
   private Compression mockCompression;
+  private static final String FAKE_STREAM = "FAKE_STREAM";
+  private static final String BLOB_RAW_SIZE_BYTES_METADATA = "rawSizeBytes";
+  private static final String BLOB_STREAM_NAME_METADATA = "streamName";
+  private static final String BLOB_RECORD_NUMBER_METADATA = "numberOfRecords";
+  private final BlobMetadataGeneratorFactory blobMetadataGeneratorFactory = mock(BlobMetadataGeneratorFactory.class);
+  private final Config blobMetadataGeneratorConfig = mock(Config.class);
 
   @Before
   public void setup() throws Exception {
@@ -90,12 +103,27 @@
     mockCompression = mock(Compression.class);
     doReturn(COMPRESSED_BYTES).when(mockCompression).compress(BYTES);
 
+    BlobMetadataGenerator mockBlobMetadataGenerator = mock(BlobMetadataGenerator.class);
+    doAnswer(invocation -> {
+      BlobMetadataContext blobMetadataContext = invocation.getArgumentAt(0, BlobMetadataContext.class);
+      String streamName = blobMetadataContext.getStreamName();
+      Long blobSize = blobMetadataContext.getBlobSize();
+      Long numberOfRecords = blobMetadataContext.getNumberOfMessagesInBlob();
+      Map<String, String> metadataProperties = new HashMap<>();
+      metadataProperties.put(BLOB_STREAM_NAME_METADATA, streamName);
+      metadataProperties.put(BLOB_RAW_SIZE_BYTES_METADATA, Long.toString(blobSize));
+      metadataProperties.put(BLOB_RECORD_NUMBER_METADATA, Long.toString(numberOfRecords));
+      return metadataProperties;
+    }).when(mockBlobMetadataGenerator).getBlobMetadata(anyObject());
+
     azureBlobOutputStream = spy(new AzureBlobOutputStream(mockBlobAsyncClient, threadPool, mockMetrics,
+        blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, FAKE_STREAM,
         60000, THRESHOLD, mockByteArrayOutputStream, mockCompression));
 
     doNothing().when(azureBlobOutputStream).commitBlob(any(ArrayList.class), anyMap());
     doNothing().when(azureBlobOutputStream).stageBlock(anyString(), any(ByteBuffer.class), anyInt());
     doNothing().when(azureBlobOutputStream).clearAndMarkClosed();
+    doReturn(mockBlobMetadataGenerator).when(azureBlobOutputStream).getBlobMetadataGenerator();
   }
 
   @Test
@@ -168,8 +196,8 @@
     verify(azureBlobOutputStream).stageBlock(eq(blockIdEncoded(1)), argument.capture(), eq((int) fullBlockCompressedByte.length));
     verify(azureBlobOutputStream).stageBlock(eq(blockIdEncoded(2)), argument2.capture(), eq((int) halfBlockCompressedByte.length));
     argument.getAllValues().forEach(byteBuffer -> {
-        Assert.assertEquals(ByteBuffer.wrap(fullBlockCompressedByte), byteBuffer);
-      });
+      Assert.assertEquals(ByteBuffer.wrap(fullBlockCompressedByte), byteBuffer);
+    });
     Assert.assertEquals(ByteBuffer.wrap(halfBlockCompressedByte), argument2.getAllValues().get(0));
     verify(mockMetrics, times(3)).updateAzureUploadMetrics();
   }
@@ -204,6 +232,7 @@
   @Test
   public void testClose() {
     azureBlobOutputStream.write(BYTES, 0, THRESHOLD);
+    azureBlobOutputStream.incrementNumberOfRecordsInBlob();
     int blockNum = 0;
     String blockId = String.format("%05d", blockNum);
     String blockIdEncoded = Base64.getEncoder().encodeToString(blockId.getBytes());
@@ -216,13 +245,17 @@
     verify(azureBlobOutputStream).commitBlob(blockListArgument.capture(), blobMetadataArg.capture());
     Assert.assertEquals(Arrays.asList(blockIdEncoded), blockListArgument.getAllValues().get(0));
     Map<String, String> blobMetadata = (Map<String, String>) blobMetadataArg.getAllValues().get(0);
-    Assert.assertEquals(blobMetadata.get(AzureBlobOutputStream.BLOB_RAW_SIZE_BYTES_METADATA), Long.toString(THRESHOLD));
+    Assert.assertEquals(blobMetadata.get(BLOB_RAW_SIZE_BYTES_METADATA), Long.toString(THRESHOLD));
+    Assert.assertEquals(blobMetadata.get(BLOB_STREAM_NAME_METADATA), FAKE_STREAM);
+    Assert.assertEquals(blobMetadata.get(BLOB_RECORD_NUMBER_METADATA), Long.toString(1));
   }
 
   @Test
   public void testCloseMultipleBlocks() {
     azureBlobOutputStream.write(BYTES, 0, THRESHOLD);
+    azureBlobOutputStream.incrementNumberOfRecordsInBlob();
     azureBlobOutputStream.write(BYTES, 0, THRESHOLD);
+    azureBlobOutputStream.incrementNumberOfRecordsInBlob();
 
     int blockNum = 0;
     String blockId = String.format("%05d", blockNum);
@@ -239,13 +272,16 @@
     Assert.assertEquals(blockIdEncoded, blockListArgument.getAllValues().get(0).toArray()[0]);
     Assert.assertEquals(blockIdEncoded1, blockListArgument.getAllValues().get(0).toArray()[1]);
     Map<String, String> blobMetadata = (Map<String, String>) blobMetadataArg.getAllValues().get(0);
-    Assert.assertEquals(blobMetadata.get(AzureBlobOutputStream.BLOB_RAW_SIZE_BYTES_METADATA), Long.toString(2 * THRESHOLD));
+    Assert.assertEquals(blobMetadata.get(BLOB_RAW_SIZE_BYTES_METADATA), Long.toString(2 * THRESHOLD));
+    Assert.assertEquals(blobMetadata.get(BLOB_STREAM_NAME_METADATA), FAKE_STREAM);
+    Assert.assertEquals(blobMetadata.get(BLOB_RECORD_NUMBER_METADATA), Long.toString(2));
   }
 
   @Test(expected = AzureException.class)
   public void testCloseFailed() {
 
     azureBlobOutputStream = spy(new AzureBlobOutputStream(mockBlobAsyncClient, threadPool, mockMetrics,
+        blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, FAKE_STREAM,
         60000, THRESHOLD, mockByteArrayOutputStream, mockCompression));
 
     //doNothing().when(azureBlobOutputStream).commitBlob(any(ArrayList.class), anyMap());
@@ -286,6 +322,7 @@
   @Test (expected = AzureException.class)
   public void testFlushFailed() throws IOException {
     azureBlobOutputStream = spy(new AzureBlobOutputStream(mockBlobAsyncClient, threadPool, mockMetrics,
+        blobMetadataGeneratorFactory, blobMetadataGeneratorConfig, FAKE_STREAM,
         60000, THRESHOLD, mockByteArrayOutputStream, mockCompression));
 
     doNothing().when(azureBlobOutputStream).commitBlob(any(ArrayList.class), anyMap());
diff --git a/samza-azure/src/test/java/org/apache/samza/system/azureblob/producer/TestAzureBlobSystemProducer.java b/samza-azure/src/test/java/org/apache/samza/system/azureblob/producer/TestAzureBlobSystemProducer.java
index 2969cd0..acf4cfb 100644
--- a/samza-azure/src/test/java/org/apache/samza/system/azureblob/producer/TestAzureBlobSystemProducer.java
+++ b/samza-azure/src/test/java/org/apache/samza/system/azureblob/producer/TestAzureBlobSystemProducer.java
@@ -45,6 +45,7 @@
 
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doThrow;
@@ -95,7 +96,7 @@
 
     systemProducer = spy(new AzureBlobSystemProducer(SYSTEM_NAME, azureBlobConfig, mockMetricsRegistry));
     // use mock writer impl
-    doReturn(mockAzureWriter).when(systemProducer).createNewWriter(anyString(), any());
+    setupWriterForProducer(systemProducer, mockAzureWriter, STREAM);
     // bypass Azure connection setup
     doNothing().when(systemProducer).setupAzureContainer(anyString(), anyString());
   }
@@ -415,7 +416,7 @@
     systemProducer.register(source1);
     systemProducer.start();
 
-    doReturn(mockAzureWriter1).when(systemProducer).createNewWriter(anyString(), any());
+    setupWriterForProducer(systemProducer, mockAzureWriter1, stream1);
 
     Thread t1 = sendFlushInThread(source1, ome1, systemProducer, sendsInFirstThread);
     Thread t2 = sendFlushInThread(source1, ome1, systemProducer, sendsInSecondThread);
@@ -451,7 +452,7 @@
     // bypass Azure connection setup
     doNothing().when(systemProducer).setupAzureContainer(anyString(), anyString());
 
-    doReturn(mockAzureWriter1).when(systemProducer).createNewWriter(anyString(), any());
+    setupWriterForProducer(systemProducer, mockAzureWriter1, stream1);
 
     systemProducer.register(source1);
     systemProducer.start();
@@ -493,7 +494,7 @@
     // bypass Azure connection setup
     doNothing().when(systemProducer).setupAzureContainer(anyString(), anyString());
 
-    doReturn(mockAzureWriter1).when(systemProducer).createNewWriter(anyString(), any());
+    setupWriterForProducer(systemProducer, mockAzureWriter1, STREAM);
 
     systemProducer.register(source1);
     systemProducer.start();
@@ -591,4 +592,15 @@
     Config config = new MapConfig(bareConfigs);
     return config;
   }
+
+  private void setupWriterForProducer(AzureBlobSystemProducer azureBlobSystemProducer,
+      AzureBlobWriter mockAzureBlobWriter, String stream) {
+    doAnswer(invocation -> {
+      String blobUrl = invocation.getArgumentAt(0, String.class);
+      String streamName = invocation.getArgumentAt(2, String.class);
+      Assert.assertEquals(stream, streamName);
+      Assert.assertEquals(stream, blobUrl);
+      return mockAzureBlobWriter;
+    }).when(azureBlobSystemProducer).createNewWriter(anyString(), any(), anyString());
+  }
 }
\ No newline at end of file
diff --git a/samza-azure/src/test/java/org/apache/samza/system/azureblob/utils/TestNullBlobMetadataGenerator.java b/samza-azure/src/test/java/org/apache/samza/system/azureblob/utils/TestNullBlobMetadataGenerator.java
new file mode 100644
index 0000000..b7de317
--- /dev/null
+++ b/samza-azure/src/test/java/org/apache/samza/system/azureblob/utils/TestNullBlobMetadataGenerator.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.system.azureblob.utils;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+
+public class TestNullBlobMetadataGenerator {
+  private NullBlobMetadataGenerator nullBlobMetadataGenerator;
+
+  @Before
+  public void setup() {
+    nullBlobMetadataGenerator = new NullBlobMetadataGenerator();
+  }
+
+  @Test
+  public void testGetBlobMetadata() {
+    Assert.assertNull(nullBlobMetadataGenerator.getBlobMetadata(new BlobMetadataContext("fake_stream", 100, 10)));
+  }
+
+  @Test
+  public void testGetBlobMetadataEmptyInput() {
+    Assert.assertNull(nullBlobMetadataGenerator.getBlobMetadata(new BlobMetadataContext("", 0, 0)));
+  }
+
+  @Test
+  public void testGetBlobMetadataNullInput() {
+    Assert.assertNull(nullBlobMetadataGenerator.getBlobMetadata(new BlobMetadataContext(null, 0, 0)));
+  }
+}
diff --git a/samza-azure/src/test/java/org/apache/samza/system/eventhub/MockEventHubClientManagerFactory.java b/samza-azure/src/test/java/org/apache/samza/system/eventhub/MockEventHubClientManagerFactory.java
index 00bffc3..2d07151 100644
--- a/samza-azure/src/test/java/org/apache/samza/system/eventhub/MockEventHubClientManagerFactory.java
+++ b/samza-azure/src/test/java/org/apache/samza/system/eventhub/MockEventHubClientManagerFactory.java
@@ -98,12 +98,12 @@
       // Consumer mocks
       PartitionReceiver mockPartitionReceiver = PowerMockito.mock(PartitionReceiver.class);
       PowerMockito.when(mockPartitionReceiver.setReceiveHandler(any())).then((Answer<Void>) invocationOnMock -> {
-          PartitionReceiveHandler handler = invocationOnMock.getArgumentAt(0, PartitionReceiveHandler.class);
-          if (handler == null) {
-            Assert.fail("Handler for setReceiverHandler was null");
-          }
-          return null;
-        });
+        PartitionReceiveHandler handler = invocationOnMock.getArgumentAt(0, PartitionReceiveHandler.class);
+        if (handler == null) {
+          Assert.fail("Handler for setReceiverHandler was null");
+        }
+        return null;
+      });
       PartitionRuntimeInformation mockPartitionRuntimeInfo = PowerMockito.mock(PartitionRuntimeInformation.class);
       PowerMockito.when(mockPartitionRuntimeInfo.getLastEnqueuedOffset())
               .thenReturn(EventHubSystemConsumer.START_OF_STREAM);
@@ -114,16 +114,16 @@
       PartitionSender mockPartitionSender1 = PowerMockito.mock(PartitionSender.class);
       PowerMockito.when(mockPartitionSender0.send(any(EventData.class)))
               .then((Answer<CompletableFuture<Void>>) invocationOnMock -> {
-                  EventData data = invocationOnMock.getArgumentAt(0, EventData.class);
-                  receivedData.get(systemName).get(streamName).get(0).add(data);
-                  return new CompletableFuture<>();
-                });
+                EventData data = invocationOnMock.getArgumentAt(0, EventData.class);
+                receivedData.get(systemName).get(streamName).get(0).add(data);
+                return new CompletableFuture<>();
+              });
       PowerMockito.when(mockPartitionSender1.send(any(EventData.class)))
               .then((Answer<CompletableFuture<Void>>) invocationOnMock -> {
-                  EventData data = invocationOnMock.getArgumentAt(0, EventData.class);
-                  receivedData.get(systemName).get(streamName).get(1).add(data);
-                  return new CompletableFuture<>();
-                });
+                EventData data = invocationOnMock.getArgumentAt(0, EventData.class);
+                receivedData.get(systemName).get(streamName).get(1).add(data);
+                return new CompletableFuture<>();
+              });
 
       EventHubRuntimeInformation mockRuntimeInfo = PowerMockito.mock(EventHubRuntimeInformation.class);
       CompletableFuture<EventHubRuntimeInformation> future =  new MockFuture(mockRuntimeInfo);
@@ -133,18 +133,18 @@
         // Consumer calls
         PowerMockito.when(mockEventHubClient.createReceiver(anyString(), anyString(), anyObject()))
               .then((Answer<CompletableFuture<PartitionReceiver>>) invocationOnMock -> {
-                  String partitionId = invocationOnMock.getArgumentAt(1, String.class);
-                  startingOffsets.put(partitionId, EventPosition.fromEndOfStream());
-                  return CompletableFuture.completedFuture(mockPartitionReceiver);
-                });
+                String partitionId = invocationOnMock.getArgumentAt(1, String.class);
+                startingOffsets.put(partitionId, EventPosition.fromEndOfStream());
+                return CompletableFuture.completedFuture(mockPartitionReceiver);
+              });
 
         PowerMockito.when(mockEventHubClient.createReceiver(anyString(), anyString(), anyObject()))
               .then((Answer<CompletableFuture<PartitionReceiver>>) invocationOnMock -> {
-                  String partitionId = invocationOnMock.getArgumentAt(1, String.class);
-                  EventPosition offset = invocationOnMock.getArgumentAt(2, EventPosition.class);
-                  startingOffsets.put(partitionId, offset);
-                  return CompletableFuture.completedFuture(mockPartitionReceiver);
-                });
+                String partitionId = invocationOnMock.getArgumentAt(1, String.class);
+                EventPosition offset = invocationOnMock.getArgumentAt(2, EventPosition.class);
+                startingOffsets.put(partitionId, offset);
+                return CompletableFuture.completedFuture(mockPartitionReceiver);
+              });
 
         PowerMockito.when(mockEventHubClient.getPartitionRuntimeInformation(anyString())).thenReturn(partitionFuture);
 
@@ -156,12 +156,12 @@
 
         PowerMockito.when(mockEventHubClient.send(any(EventData.class), anyString()))
                 .then((Answer<CompletableFuture<Void>>) invocationOnMock -> {
-                    EventData data = invocationOnMock.getArgumentAt(0, EventData.class);
-                    String key = invocationOnMock.getArgumentAt(1, String.class);
-                    Integer intKey = Integer.valueOf(key);
-                    receivedData.get(systemName).get(streamName).get(intKey % 2).add(data);
-                    return new CompletableFuture<>();
-                  });
+                  EventData data = invocationOnMock.getArgumentAt(0, EventData.class);
+                  String key = invocationOnMock.getArgumentAt(1, String.class);
+                  Integer intKey = Integer.valueOf(key);
+                  receivedData.get(systemName).get(streamName).get(intKey % 2).add(data);
+                  return new CompletableFuture<>();
+                });
       } catch (Exception e) {
         Assert.fail("Failed to create create mock methods for EventHubClient");
       }
diff --git a/samza-azure/src/test/java/org/apache/samza/system/eventhub/admin/TestEventHubSystemAdmin.java b/samza-azure/src/test/java/org/apache/samza/system/eventhub/admin/TestEventHubSystemAdmin.java
index befbf3a..f0b3a9c 100644
--- a/samza-azure/src/test/java/org/apache/samza/system/eventhub/admin/TestEventHubSystemAdmin.java
+++ b/samza-azure/src/test/java/org/apache/samza/system/eventhub/admin/TestEventHubSystemAdmin.java
@@ -86,11 +86,11 @@
       Assert.assertTrue(partitionMetadataMap.size() >= MIN_EVENTHUB_ENTITY_PARTITION);
       Assert.assertTrue(partitionMetadataMap.size() <= MAX_EVENTHUB_ENTITY_PARTITION);
       partitionMetadataMap.forEach((partition, metadata) -> {
-          Assert.assertEquals(EventHubSystemConsumer.START_OF_STREAM, metadata.getOldestOffset());
-          Assert.assertNotSame(EventHubSystemConsumer.END_OF_STREAM, metadata.getNewestOffset());
-          String expectedUpcomingOffset = String.valueOf(Long.parseLong(metadata.getNewestOffset()) + 1);
-          Assert.assertEquals(expectedUpcomingOffset, metadata.getUpcomingOffset());
-        });
+        Assert.assertEquals(EventHubSystemConsumer.START_OF_STREAM, metadata.getOldestOffset());
+        Assert.assertNotSame(EventHubSystemConsumer.END_OF_STREAM, metadata.getNewestOffset());
+        String expectedUpcomingOffset = String.valueOf(Long.parseLong(metadata.getNewestOffset()) + 1);
+        Assert.assertEquals(expectedUpcomingOffset, metadata.getUpcomingOffset());
+      });
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java b/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java
index 19e776e..344a034 100644
--- a/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java
+++ b/samza-core/src/main/java/org/apache/samza/classloader/IsolatingClassLoaderFactory.java
@@ -244,7 +244,7 @@
     apiParentRelationshipBuilder.addDelegatePreferredClassPredicate(new BootstrapClassPredicate());
     // the classes which are Samza framework API classes are added here
     getFrameworkApiClassGlobs(apiLibDirectory).forEach(
-        apiClassName -> apiParentRelationshipBuilder.addDelegatePreferredClassPredicate(new GlobMatcher(apiClassName)));
+      apiClassName -> apiParentRelationshipBuilder.addDelegatePreferredClassPredicate(new GlobMatcher(apiClassName)));
     return apiParentRelationshipBuilder.build();
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java
index 0cb80b2..8482a3b 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterBasedJobCoordinator.java
@@ -20,7 +20,6 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
-import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -71,6 +70,7 @@
 import org.apache.samza.util.ConfigUtil;
 import org.apache.samza.util.CoordinatorStreamUtil;
 import org.apache.samza.util.DiagnosticsUtil;
+import org.apache.samza.util.SplitDeploymentUtil;
 import org.apache.samza.util.SystemClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -382,13 +382,13 @@
     return Optional.of(new StreamPartitionCountMonitor(inputStreamsToMonitor, streamMetadata, metrics,
         new JobConfig(config).getMonitorPartitionChangeFrequency(), streamsChanged -> {
       // Fail the jobs with durable state store. Otherwise, application state.status remains UNDEFINED s.t. YARN job will be restarted
-        if (hasDurableStores) {
-          LOG.error("Input topic partition count changed in a job with durable state. Failing the job. " +
-              "Changed topics: {}", streamsChanged.toString());
-          state.status = SamzaApplicationState.SamzaAppStatus.FAILED;
-        }
-        coordinatorException = new PartitionChangeException("Input topic partition count changes detected for topics: " + streamsChanged.toString());
-      }));
+      if (hasDurableStores) {
+        LOG.error("Input topic partition count changed in a job with durable state. Failing the job. " +
+            "Changed topics: {}", streamsChanged.toString());
+        state.status = SamzaApplicationState.SamzaAppStatus.FAILED;
+      }
+      coordinatorException = new PartitionChangeException("Input topic partition count changes detected for topics: " + streamsChanged.toString());
+    }));
   }
 
   private Optional<StreamRegexMonitor> getInputRegexMonitor(Config config, SystemAdmins systemAdmins, Set<SystemStream> inputStreamsToMonitor) {
@@ -464,78 +464,21 @@
    * The entry point for the {@link ClusterBasedJobCoordinator}.
    */
   public static void main(String[] args) {
-    boolean dependencyIsolationEnabled = Boolean.parseBoolean(
-        System.getenv(ShellCommandConfig.ENV_CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED));
     Thread.setDefaultUncaughtExceptionHandler((thread, exception) -> {
-        LOG.error("Uncaught exception in ClusterBasedJobCoordinator::main. Exiting job coordinator", exception);
-        System.exit(1);
-      });
-    if (!dependencyIsolationEnabled) {
+      LOG.error("Uncaught exception in ClusterBasedJobCoordinator::main. Exiting job coordinator", exception);
+      System.exit(1);
+    });
+    if (!SplitDeploymentUtil.isSplitDeploymentEnabled()) {
       // no isolation enabled, so can just execute runClusterBasedJobCoordinator directly
       runClusterBasedJobCoordinator(args);
     } else {
-      runWithClassLoader(new IsolatingClassLoaderFactory().buildClassLoader(), args);
+      SplitDeploymentUtil.runWithClassLoader(new IsolatingClassLoaderFactory().buildClassLoader(),
+          ClusterBasedJobCoordinator.class, "runClusterBasedJobCoordinator", args);
     }
     System.exit(0);
   }
 
   /**
-   * Execute the coordinator using a separate isolated classloader.
-   * @param classLoader {@link ClassLoader} to use to load the {@link ClusterBasedJobCoordinator} which will run
-   * @param args arguments to pass when running the {@link ClusterBasedJobCoordinator}
-   */
-  @VisibleForTesting
-  static void runWithClassLoader(ClassLoader classLoader, String[] args) {
-    // need to use the isolated classloader to load ClusterBasedJobCoordinator and then run using that new class
-    Class<?> clusterBasedJobCoordinatorClass;
-    try {
-      clusterBasedJobCoordinatorClass = classLoader.loadClass(ClusterBasedJobCoordinator.class.getName());
-    } catch (ClassNotFoundException e) {
-      throw new SamzaException(
-          "Isolation was enabled, but unable to find ClusterBasedJobCoordinator in isolated classloader", e);
-    }
-
-    // save the current context classloader so it can be reset after finishing the call to runClusterBasedJobCoordinator
-    ClassLoader previousContextClassLoader = Thread.currentThread().getContextClassLoader();
-    // this is needed because certain libraries (e.g. log4j) use the context classloader
-    Thread.currentThread().setContextClassLoader(classLoader);
-
-    try {
-      executeRunClusterBasedJobCoordinatorForClass(clusterBasedJobCoordinatorClass, args);
-    } finally {
-      // reset the context class loader; it's good practice, and could be important when running a test suite
-      Thread.currentThread().setContextClassLoader(previousContextClassLoader);
-    }
-  }
-
-  /**
-   * Runs the {@link ClusterBasedJobCoordinator#runClusterBasedJobCoordinator(String[])} method of the given
-   * {@code clusterBasedJobCoordinatorClass} using reflection.
-   * @param clusterBasedJobCoordinatorClass {@link ClusterBasedJobCoordinator} {@link Class} for which to execute
-   * {@link ClusterBasedJobCoordinator#runClusterBasedJobCoordinator(String[])}
-   * @param args arguments to pass to {@link ClusterBasedJobCoordinator#runClusterBasedJobCoordinator(String[])}
-   */
-  private static void executeRunClusterBasedJobCoordinatorForClass(Class<?> clusterBasedJobCoordinatorClass,
-      String[] args) {
-    Method runClusterBasedJobCoordinatorMethod;
-    try {
-      runClusterBasedJobCoordinatorMethod =
-          clusterBasedJobCoordinatorClass.getDeclaredMethod("runClusterBasedJobCoordinator", String[].class);
-    } catch (NoSuchMethodException e) {
-      throw new SamzaException("Isolation was enabled, but unable to find runClusterBasedJobCoordinator method", e);
-    }
-    // only sets accessible flag for this Method instance, not other Method instances for runClusterBasedJobCoordinator
-    runClusterBasedJobCoordinatorMethod.setAccessible(true);
-
-    try {
-      // wrapping args in object array so that args is passed as a single argument to the method
-      runClusterBasedJobCoordinatorMethod.invoke(null, new Object[]{args});
-    } catch (IllegalAccessException | InvocationTargetException e) {
-      throw new SamzaException("Exception while executing runClusterBasedJobCoordinator method", e);
-    }
-  }
-
-  /**
    * This is the actual execution for the {@link ClusterBasedJobCoordinator}. This is separated out from
    * {@link #main(String[])} so that it can be executed directly or from a separate classloader.
    */
@@ -636,21 +579,21 @@
     List<String> args = new ArrayList<>(config.size() * 2);
 
     config.forEach((key, value) -> {
-        if (key.equals(ApplicationConfig.APP_MAIN_ARGS)) {
-          /*
-           * Converts native beam pipeline options such as
-           * --runner=SamzaRunner --maxSourceParallelism=1024
-           */
-          args.addAll(Arrays.asList(value.split("\\s")));
-        } else {
-          /*
-           * Converts native Samza configs to config override format such as
-           * --config job.name=test
-           */
-          args.add("--config");
-          args.add(String.format("%s=%s", key, value));
-        }
-      });
+      if (key.equals(ApplicationConfig.APP_MAIN_ARGS)) {
+        /*
+         * Converts native beam pipeline options such as
+         * --runner=SamzaRunner --maxSourceParallelism=1024
+         */
+        args.addAll(Arrays.asList(value.split("\\s")));
+      } else {
+        /*
+         * Converts native Samza configs to config override format such as
+         * --config job.name=test
+         */
+        args.add("--config");
+        args.add(String.format("%s=%s", key, value));
+      }
+    });
 
     return args.toArray(new String[0]);
   }
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterResourceManager.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterResourceManager.java
index 8ea3c30..43b3d36 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterResourceManager.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ClusterResourceManager.java
@@ -180,6 +180,13 @@
      */
     void onStreamProcessorLaunchFailure(SamzaResource resource, Throwable t);
 
+    /**
+     * Callback invoked when there is a failure in stopping a processor on the provided {@link SamzaResource}.
+     * @param resource the resource on which the processor was running
+     * @param t the error in stopping the processor
+     */
+    void onStreamProcessorStopFailure(SamzaResource resource, Throwable t);
+
     /***
      * This callback is invoked when there is an error in the ClusterResourceManager. This is
      * guaranteed to be invoked when there is an uncaught exception in any other
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerManager.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerManager.java
index b9427b3..70a050c 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerManager.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerManager.java
@@ -134,6 +134,12 @@
       } else if (actionStatus == ContainerPlacementMetadata.ContainerStatus.STOP_IN_PROGRESS) {
         LOG.info("Waiting for running container to shutdown due to existing ContainerPlacement action {}", actionMetaData);
         return false;
+      } else if (actionStatus == ContainerPlacementMetadata.ContainerStatus.STOP_FAILED) {
+        LOG.info("Shutdown on running container failed for action {}", actionMetaData);
+        markContainerPlacementActionFailed(actionMetaData,
+            String.format("failed to stop container on current host %s", actionMetaData.getSourceHost()));
+        resourceRequestState.cancelResourceRequest(request);
+        return true;
       } else if (actionStatus == ContainerPlacementMetadata.ContainerStatus.STOPPED) {
         // If the job has standby containers enabled, always check standby constraints before issuing a start on container
         // Note: Always check constraints against allocated resource, since preferred host can be ANY_HOST as well
@@ -234,6 +240,29 @@
   }
 
   /**
+   * Handle the container stop failure for active containers and standby (if enabled).
+   * @param processorId logical id of the container eg 1,2,3
+   * @param containerId last known id of the container deployed
+   * @param containerHost host on which container is requested to be deployed
+   * @param containerAllocator allocator for requesting resources
+   * TODO: SAMZA-2512 Add integ test for handleContainerStopFail
+   */
+  void handleContainerStopFail(String processorId, String containerId, String containerHost,
+      ContainerAllocator containerAllocator) {
+    if (processorId != null && hasActiveContainerPlacementAction(processorId)) {
+      // Assuming resource acquired on destination host will be relinquished by the containerAllocator,
+      // We mark the placement action as failed, and return.
+      ContainerPlacementMetadata metaData = getPlacementActionMetadata(processorId).get();
+      metaData.setContainerStatus(ContainerPlacementMetadata.ContainerStatus.STOP_FAILED);
+    } else if (processorId != null && standbyContainerManager.isPresent()) {
+      standbyContainerManager.get().handleContainerStopFail(processorId, containerId, containerAllocator);
+    } else {
+      LOG.warn("Did not find a running Processor ID for Container ID: {} on host: {}. "
+          + "Ignoring invalid/redundant notification.", containerId, containerHost);
+    }
+  }
+
+  /**
    * Handles the state update on successful launch of a container, if this launch is due to a container placement action updates the
    * related metadata to report success
    *
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java
index d3962ab..f6e3b1f 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ContainerProcessManager.java
@@ -25,6 +25,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.samza.SamzaException;
 import org.apache.samza.clustermanager.container.placement.ContainerPlacementMetadataStore;
@@ -186,8 +187,8 @@
     this.containerManager = containerManager;
     this.diagnosticsManager = Option.empty();
     this.containerAllocator = allocator.orElseGet(
-        () -> new ContainerAllocator(this.clusterResourceManager, clusterManagerConfig, state,
-            hostAffinityEnabled, this.containerManager));
+      () -> new ContainerAllocator(this.clusterResourceManager, clusterManagerConfig, state,
+          hostAffinityEnabled, this.containerManager));
     this.allocatorThread = new Thread(this.containerAllocator, "Container Allocator Thread");
     LOG.info("Finished container process manager initialization");
   }
@@ -299,17 +300,10 @@
    */
   public void onResourceCompleted(SamzaResourceStatus resourceStatus) {
     String containerId = resourceStatus.getContainerId();
-    String processorId = null;
-    String hostName = null;
-    for (Map.Entry<String, SamzaResource> entry: state.runningProcessors.entrySet()) {
-      if (entry.getValue().getContainerId().equals(resourceStatus.getContainerId())) {
-        LOG.info("Container ID: {} matched running Processor ID: {} on host: {}", containerId, entry.getKey(), entry.getValue().getHost());
+    Pair<String, String> runningProcessorIdHostname = getRunningProcessor(containerId);
+    String processorId = runningProcessorIdHostname.getKey();
+    String hostName = runningProcessorIdHostname.getValue();
 
-        processorId = entry.getKey();
-        hostName = entry.getValue().getHost();
-        break;
-      }
-    }
     if (processorId == null) {
       LOG.info("No running Processor ID found for Container ID: {} with Status: {}. Ignoring redundant notification.", containerId, resourceStatus.toString());
       state.redundantNotifications.incrementAndGet();
@@ -431,6 +425,18 @@
     containerManager.handleContainerLaunchFail(processorId, containerId, containerHost, containerAllocator);
   }
 
+  @Override
+  public void onStreamProcessorStopFailure(SamzaResource resource, Throwable t) {
+    String containerId = resource.getContainerId();
+    String containerHost = resource.getHost();
+    String processorId = getRunningProcessor(containerId).getKey();
+    LOG.warn("Stop failed for running Processor ID: {} on Container ID: {} on host: {} with exception: {}",
+        processorId, containerId, containerHost, t);
+
+    // Notify container-manager of the failed container-stop request
+    containerManager.handleContainerStopFail(processorId, containerId, containerHost, containerAllocator);
+  }
+
   /**
    * An error in the callback terminates the JobCoordinator
    * @param e the underlying exception/error
@@ -623,6 +629,20 @@
     return null;
   }
 
+  private Pair<String, String> getRunningProcessor(String containerId) {
+    for (Map.Entry<String, SamzaResource> entry: state.runningProcessors.entrySet()) {
+      if (entry.getValue().getContainerId().equals(containerId)) {
+        LOG.info("Container ID: {} matched running Processor ID: {} on host: {}", containerId, entry.getKey(), entry.getValue().getHost());
+
+        String processorId = entry.getKey();
+        String hostName = entry.getValue().getHost();
+        return new ImmutablePair<>(processorId, hostName);
+      }
+    }
+
+    return new ImmutablePair<>(null, null);
+  }
+
   /**
    * Request {@link ContainerManager#handleContainerStop} to determine next step of actions for the stopped container
    */
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/JobCoordinatorLaunchUtil.java b/samza-core/src/main/java/org/apache/samza/clustermanager/JobCoordinatorLaunchUtil.java
index fc1d34e..63a1f5c 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/JobCoordinatorLaunchUtil.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/JobCoordinatorLaunchUtil.java
@@ -26,6 +26,7 @@
 import org.apache.samza.application.descriptors.ApplicationDescriptorUtil;
 import org.apache.samza.config.Config;
 import org.apache.samza.config.JobConfig;
+import org.apache.samza.config.MapConfig;
 import org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore;
 import org.apache.samza.execution.RemoteJobPlanner;
 import org.apache.samza.metadatastore.MetadataStore;
@@ -57,18 +58,23 @@
       throw new SamzaException("Only support single remote job is supported.");
     }
 
-    Config finalConfig = jobConfigs.get(0);
-
-    // This needs to be consistent with RemoteApplicationRunner#run where JobRunner#submit to be called instead of JobRunner#run
-    CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
-    DiagnosticsUtil.createDiagnosticsStream(finalConfig);
+    Config fullConfig = jobConfigs.get(0);
+    // Create coordinator stream if does not exist before fetching launch config from it.
+    CoordinatorStreamUtil.createCoordinatorStream(fullConfig);
     MetricsRegistryMap metrics = new MetricsRegistryMap();
     MetadataStore
-        metadataStore = new CoordinatorStreamStore(CoordinatorStreamUtil.buildCoordinatorStreamConfig(finalConfig), metrics);
+        metadataStore = new CoordinatorStreamStore(CoordinatorStreamUtil.buildCoordinatorStreamConfig(fullConfig), metrics);
     // MetadataStore will be closed in ClusterBasedJobCoordinator#onShutDown
     // initialization of MetadataStore can be moved to ClusterBasedJobCoordinator after we clean up
     // ClusterBasedJobCoordinator#createFromMetadataStore
     metadataStore.init();
+    // Reads extra launch config from metadata store.
+    Config launchConfig = CoordinatorStreamUtil.readLaunchConfigFromCoordinatorStream(fullConfig, metadataStore);
+    Config finalConfig = new MapConfig(launchConfig, fullConfig);
+
+    // This needs to be consistent with RemoteApplicationRunner#run where JobRunner#submit to be called instead of JobRunner#run
+    CoordinatorStreamUtil.writeConfigToCoordinatorStream(finalConfig, true);
+    DiagnosticsUtil.createDiagnosticsStream(finalConfig);
 
     ClusterBasedJobCoordinator jc = new ClusterBasedJobCoordinator(
         metrics,
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java b/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java
index 2e4bcdb..69646b7 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/ResourceRequestState.java
@@ -258,10 +258,10 @@
 
     synchronized (lock) {
       allocatedResources.values().forEach(resources -> {
-          if (resources != null) {
-            resources.removeIf(r -> containerId.equals(r.getContainerId()));
-          }
-        });
+        if (resources != null) {
+          resources.removeIf(r -> containerId.equals(r.getContainerId()));
+        }
+      });
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java b/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java
index a9d298d..30d0de9 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/StandbyContainerManager.java
@@ -127,6 +127,31 @@
   }
 
   /**
+   *  Handle the failed stop for a container, based on
+   *  Case 1. If it is standby container, continue the failover
+   *  Case 2. If it is an active container, then this is in invalid state and throw an exception to alarm/restart.
+   * @param containerID the ID (e.g., 0, 1, 2) of the container that has failed
+   * @param resourceID id of the resource used for the failed container
+   */
+  public void handleContainerStopFail(String containerID, String resourceID,
+      ContainerAllocator containerAllocator) {
+    if (StandbyTaskUtil.isStandbyContainer(containerID)) {
+      log.info("Handling stop fail for standby-container {}, continuing the failover (if present)", containerID);
+
+      // if this standbyContainerResource was stopped for a failover, we will find a metadata entry
+      Optional<StandbyContainerManager.FailoverMetadata> failoverMetadata = this.checkIfUsedForFailover(resourceID);
+
+      // if we find a metadata entry, we continue with the failover (select another standby or any-host appropriately)
+      failoverMetadata.ifPresent(
+        metadata -> initiateStandbyAwareAllocation(metadata.activeContainerID, metadata.activeContainerResourceID,
+            containerAllocator));
+    } else {
+      // If this class receives a callback for stop-fail on an active container, throw an exception
+      throw new SamzaException("Invalid State. Received stop container fail for container Id: " + containerID);
+    }
+  }
+
+  /**
    *  If a standby container has stopped, then there are two possible cases
    *    Case 1. during a failover, the standby container was stopped for an active's start, then we
    *       1. request a resource on the standby's host to place the activeContainer, and
@@ -191,10 +216,10 @@
 
       Map<String, SamzaResource> runningStandbyContainersOnHost = new HashMap<>();
       this.samzaApplicationState.runningProcessors.forEach((samzaContainerId, samzaResource) -> {
-          if (standbySamzaContainerIds.contains(samzaContainerId) && samzaResource.getHost().equals(standbyHost)) {
-            runningStandbyContainersOnHost.put(samzaContainerId, samzaResource);
-          }
-        });
+        if (standbySamzaContainerIds.contains(samzaContainerId) && samzaResource.getHost().equals(standbyHost)) {
+          runningStandbyContainersOnHost.put(samzaContainerId, samzaResource);
+        }
+      });
 
       if (runningStandbyContainersOnHost.isEmpty()) {
         // if there are no running standby-containers on the standbyHost, we proceed to directly make a resource request
@@ -214,13 +239,13 @@
         FailoverMetadata failoverMetadata = this.registerActiveContainerFailure(activeContainerID, resourceID);
 
         runningStandbyContainersOnHost.forEach((standbyContainerID, standbyResource) -> {
-            log.info("Initiating failover and stopping standby container, found standbyContainer {} = resource {}, "
-                    + "for active container {}", runningStandbyContainersOnHost.keySet(),
-                runningStandbyContainersOnHost.values(), activeContainerID);
-            failoverMetadata.updateStandbyContainer(standbyResource.getContainerId(), standbyResource.getHost());
-            samzaApplicationState.failoversToStandby.incrementAndGet();
-            this.clusterResourceManager.stopStreamProcessor(standbyResource);
-          });
+          log.info("Initiating failover and stopping standby container, found standbyContainer {} = resource {}, "
+                  + "for active container {}", runningStandbyContainersOnHost.keySet(),
+              runningStandbyContainersOnHost.values(), activeContainerID);
+          failoverMetadata.updateStandbyContainer(standbyResource.getContainerId(), standbyResource.getHost());
+          samzaApplicationState.failoversToStandby.incrementAndGet();
+          this.clusterResourceManager.stopStreamProcessor(standbyResource);
+        });
 
         // if multiple standbys are on the same host, we are in an invalid state, so we fail the deploy and retry
         if (runningStandbyContainersOnHost.size() > 1) {
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementMetadata.java b/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementMetadata.java
index 15c9e1c..0f415d6 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementMetadata.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementMetadata.java
@@ -40,7 +40,7 @@
   /**
    * State to track container failover
    */
-  public enum ContainerStatus { RUNNING, STOP_IN_PROGRESS, STOPPED }
+  public enum ContainerStatus { RUNNING, STOP_IN_PROGRESS, STOP_FAILED, STOPPED }
   // Container Placement request message
   private final ContainerPlacementRequestMessage requestMessage;
   // Host where the container is actively running
diff --git a/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementRequestAllocator.java b/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementRequestAllocator.java
index 5161cfb..7eb6175 100644
--- a/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementRequestAllocator.java
+++ b/samza-core/src/main/java/org/apache/samza/clustermanager/container/placement/ContainerPlacementRequestAllocator.java
@@ -18,6 +18,7 @@
  */
 package org.apache.samza.clustermanager.container.placement;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.samza.clustermanager.ContainerProcessManager;
 import org.apache.samza.config.ApplicationConfig;
@@ -50,7 +51,10 @@
    * RunId of the app
    */
   private final String appRunId;
-
+  /**
+   * Sleep time for container placement handler thread
+   */
+  private final int containerPlacementHandlerSleepMs;
   public ContainerPlacementRequestAllocator(ContainerPlacementMetadataStore containerPlacementMetadataStore, ContainerProcessManager manager, ApplicationConfig config) {
     Preconditions.checkNotNull(containerPlacementMetadataStore, "containerPlacementMetadataStore cannot be null");
     Preconditions.checkNotNull(manager, "ContainerProcessManager cannot be null");
@@ -58,6 +62,22 @@
     this.containerPlacementMetadataStore = containerPlacementMetadataStore;
     this.isRunning = true;
     this.appRunId = config.getRunId();
+    this.containerPlacementHandlerSleepMs = DEFAULT_CLUSTER_MANAGER_CONTAINER_PLACEMENT_HANDLER_SLEEP_MS;
+  }
+
+  @VisibleForTesting
+  /**
+   * Should only get used for testing, cannot make it package private because end to end integeration test
+   * need package private methods which live in org.apache.samza.clustermanager
+   */
+  public ContainerPlacementRequestAllocator(ContainerPlacementMetadataStore containerPlacementMetadataStore, ContainerProcessManager manager, ApplicationConfig config, int containerPlacementHandlerSleepMs) {
+    Preconditions.checkNotNull(containerPlacementMetadataStore, "containerPlacementMetadataStore cannot be null");
+    Preconditions.checkNotNull(manager, "ContainerProcessManager cannot be null");
+    this.containerProcessManager = manager;
+    this.containerPlacementMetadataStore = containerPlacementMetadataStore;
+    this.isRunning = true;
+    this.appRunId = config.getRunId();
+    this.containerPlacementHandlerSleepMs = containerPlacementHandlerSleepMs;
   }
 
   @Override
@@ -75,7 +95,7 @@
             containerPlacementMetadataStore.deleteAllContainerPlacementMessages(message.getUuid());
           }
         }
-        Thread.sleep(DEFAULT_CLUSTER_MANAGER_CONTAINER_PLACEMENT_HANDLER_SLEEP_MS);
+        Thread.sleep(containerPlacementHandlerSleepMs);
       } catch (InterruptedException e) {
         LOG.warn("Got InterruptedException in ContainerPlacementRequestAllocator thread.", e);
         Thread.currentThread().interrupt();
diff --git a/samza-core/src/main/java/org/apache/samza/config/JobConfig.java b/samza-core/src/main/java/org/apache/samza/config/JobConfig.java
index dff2991..0b274e9 100644
--- a/samza-core/src/main/java/org/apache/samza/config/JobConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/JobConfig.java
@@ -143,8 +143,7 @@
   public static final String COORDINATOR_STREAM_FACTORY = "job.coordinatorstream.config.factory";
   public static final String DEFAULT_COORDINATOR_STREAM_CONFIG_FACTORY = "org.apache.samza.util.DefaultCoordinatorStreamConfigFactory";
 
-  public static final String CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED =
-      "samza.cluster.based.job.coordinator.dependency.isolation.enabled";
+  public static final String JOB_SPLIT_DEPLOYMENT_ENABLED = "job.split.deployment.enabled";
 
   private static final String JOB_STARTPOINT_ENABLED = "job.startpoint.enabled";
 
@@ -221,20 +220,20 @@
   public Map<String, Pattern> getMonitorRegexPatternMap(String rewritersList) {
     Map<String, Pattern> inputRegexesToMonitor = new HashMap<>();
     Stream.of(rewritersList.split(",")).forEach(rewriterName -> {
-        Optional<String> rewriterSystem = getRegexResolvedSystem(rewriterName);
-        Optional<String> rewriterRegex = getRegexResolvedStreams(rewriterName);
-        if (rewriterSystem.isPresent() && rewriterRegex.isPresent()) {
-          Pattern newPatternForSystem;
-          Pattern existingPatternForSystem = inputRegexesToMonitor.get(rewriterSystem.get());
-          if (existingPatternForSystem == null) {
-            newPatternForSystem = Pattern.compile(rewriterRegex.get());
-          } else {
-            newPatternForSystem =
-                Pattern.compile(String.join("|", existingPatternForSystem.pattern(), rewriterRegex.get()));
-          }
-          inputRegexesToMonitor.put(rewriterSystem.get(), newPatternForSystem);
+      Optional<String> rewriterSystem = getRegexResolvedSystem(rewriterName);
+      Optional<String> rewriterRegex = getRegexResolvedStreams(rewriterName);
+      if (rewriterSystem.isPresent() && rewriterRegex.isPresent()) {
+        Pattern newPatternForSystem;
+        Pattern existingPatternForSystem = inputRegexesToMonitor.get(rewriterSystem.get());
+        if (existingPatternForSystem == null) {
+          newPatternForSystem = Pattern.compile(rewriterRegex.get());
+        } else {
+          newPatternForSystem =
+              Pattern.compile(String.join("|", existingPatternForSystem.pattern(), rewriterRegex.get()));
         }
-      });
+        inputRegexesToMonitor.put(rewriterSystem.get(), newPatternForSystem);
+      }
+    });
     return inputRegexesToMonitor;
   }
 
@@ -294,13 +293,13 @@
   public String getSSPMatcherConfigRegex() {
     return Optional.ofNullable(get(SSP_MATCHER_CONFIG_REGEX))
         .orElseThrow(
-            () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_REGEX)));
+          () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_REGEX)));
   }
 
   public String getSSPMatcherConfigRanges() {
     return Optional.ofNullable(get(SSP_MATCHER_CONFIG_RANGES))
         .orElseThrow(
-            () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_RANGES)));
+          () -> new SamzaException(String.format("Missing required configuration: '%s'", SSP_MATCHER_CONFIG_RANGES)));
   }
 
   public String getSSPMatcherConfigJobFactoryRegex() {
@@ -352,7 +351,7 @@
    * @param configParam the config param to determine
    * @return true if the config is related to autosizing, false otherwise
    */
-  public boolean isAutosizingConfig(String configParam) {
+  public static boolean isAutosizingConfig(String configParam) {
     return configParam.startsWith(JOB_AUTOSIZING_CONFIG_PREFIX);
   }
 
@@ -372,8 +371,8 @@
     return getStandbyTaskReplicationFactor() > 1;
   }
 
-  public boolean getClusterBasedJobCoordinatorDependencyIsolationEnabled() {
-    return getBoolean(CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, false);
+  public boolean isSplitDeploymentEnabled() {
+    return getBoolean(JOB_SPLIT_DEPLOYMENT_ENABLED, false);
   }
 
   /**
diff --git a/samza-core/src/main/java/org/apache/samza/config/ShellCommandConfig.java b/samza-core/src/main/java/org/apache/samza/config/ShellCommandConfig.java
index 668c40a..73093a8 100644
--- a/samza-core/src/main/java/org/apache/samza/config/ShellCommandConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/ShellCommandConfig.java
@@ -58,26 +58,25 @@
   public static final String ENV_EXECUTION_ENV_CONTAINER_ID = "EXECUTION_ENV_CONTAINER_ID";
 
   /**
-   * Set to "true" if cluster-based job coordinator dependency isolation is enabled. Otherwise, will be considered
-   * false.
+   * Set to "true" if split deployment feature is enabled. Otherwise, will be considered false.
    *
-   * The launch process for the cluster-based job coordinator depends on the value of this, since it needs to be known
-   * if the cluster-based job coordinator should be launched in an isolated mode. This needs to be an environment
-   * variable, because the value needs to be known before the full configs can be read from the metadata store (full
-   * configs are only read after launch is complete).
+   * The launch process for the cluster-based job coordinator and job container depends on the value of this, since it
+   * needs to be known if the cluster-based job coordinator and job container should be launched in a split deployment
+   * mode.
+   * This needs to be an environment variable, because the value needs to be known before the full configs can be read
+   * from the metadata store (full configs are only read after launch is complete).
    */
-  public static final String ENV_CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED =
-      "CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED";
+  public static final String ENV_SPLIT_DEPLOYMENT_ENABLED = "ENV_SPLIT_DEPLOYMENT_ENABLED";
 
   /**
-   * When running the cluster-based job coordinator in an isolated mode, it uses JARs and resources from a lib directory
-   * which is provided by the framework. In some cases, it is necessary to use some resources specified by the
-   * application as well. This environment variable can be set to a directory which is different from the framework lib
-   * directory in order to tell Samza where application resources live.
-   * This is an environment variable because it is needed in order to launch the cluster-based job coordinator Java
-   * process, which means access to full configs is not available yet.
+   * When running the cluster-based job coordinator and job container in a split deployment mode, it uses JARs and
+   * resources from a lib directory which is provided by the framework. In some cases, it is necessary to use some
+   * resources specified by the application as well. This environment variable can be set to a directory which is
+   * different from the framework lib directory in order to tell Samza where application resources live.
+   * This is an environment variable because it is needed in order to launch the cluster-based job coordinator and job
+   * container Java processes, which means access to full configs is not available yet.
    * For example, this is used to set a system property for the location of an application-specified log4j configuration
-   * file when launching the cluster-based job coordinator Java process.
+   * file when launching the cluster-based job coordinator and job container Java processes.
    */
   public static final String ENV_APPLICATION_LIB_DIR = "APPLICATION_LIB_DIR";
 
diff --git a/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java b/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java
index 8ee044e..950cf10 100644
--- a/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/StreamConfig.java
@@ -282,9 +282,9 @@
     Set<SystemStream> legacySystemStreams = subConf.keySet().stream()
       .filter(k -> k.endsWith(MSG_SERDE) || k.endsWith(KEY_SERDE))
       .map(k -> {
-          String streamName = k.substring(0, k.length() - 16 /* .samza.XXX.serde length */);
-          return new SystemStream(systemName, streamName);
-        })
+        String streamName = k.substring(0, k.length() - 16 /* .samza.XXX.serde length */);
+        return new SystemStream(systemName, streamName);
+      })
       .collect(Collectors.toSet());
 
     Set<SystemStream> systemStreams = subset(STREAMS_PREFIX).keySet().stream()
diff --git a/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java b/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java
index 7b44a3a..a2da35d 100644
--- a/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/SystemConfig.java
@@ -92,8 +92,8 @@
     return getSystemFactories().entrySet()
         .stream()
         .collect(Collectors.toMap(Entry::getKey,
-            systemNameToFactoryEntry -> systemNameToFactoryEntry.getValue()
-                .getAdmin(systemNameToFactoryEntry.getKey(), this)));
+          systemNameToFactoryEntry -> systemNameToFactoryEntry.getValue()
+              .getAdmin(systemNameToFactoryEntry.getKey(), this)));
   }
 
   /**
diff --git a/samza-core/src/main/java/org/apache/samza/config/TaskConfig.java b/samza-core/src/main/java/org/apache/samza/config/TaskConfig.java
index f5f09b2..461b647 100644
--- a/samza-core/src/main/java/org/apache/samza/config/TaskConfig.java
+++ b/samza-core/src/main/java/org/apache/samza/config/TaskConfig.java
@@ -111,7 +111,7 @@
   public static final String TRANSACTIONAL_STATE_CHECKPOINT_ENABLED = "task.transactional.state.checkpoint.enabled";
   private static final boolean DEFAULT_TRANSACTIONAL_STATE_CHECKPOINT_ENABLED = true;
   public static final String TRANSACTIONAL_STATE_RESTORE_ENABLED = "task.transactional.state.restore.enabled";
-  private static final boolean DEFAULT_TRANSACTIONAL_STATE_RESTORE_ENABLED = false;
+  private static final boolean DEFAULT_TRANSACTIONAL_STATE_RESTORE_ENABLED = true;
   public static final String TRANSACTIONAL_STATE_RETAIN_EXISTING_STATE =
       "task.transactional.state.retain.existing.state";
   private static final boolean DEFAULT_TRANSACTIONAL_STATE_RETAIN_EXISTING_STATE = true;
diff --git a/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java b/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java
index 1a131c3..89b5fc9 100644
--- a/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java
+++ b/samza-core/src/main/java/org/apache/samza/container/ContainerHeartbeatMonitor.java
@@ -63,17 +63,17 @@
     }
     LOG.info("Starting ContainerHeartbeatMonitor");
     scheduler.scheduleAtFixedRate(() -> {
-        ContainerHeartbeatResponse response = containerHeartbeatClient.requestHeartbeat();
-        if (!response.isAlive()) {
-          scheduler.schedule(() -> {
-              // On timeout of container shutting down, force exit.
-              LOG.error("Graceful shutdown timeout expired. Force exiting.");
-              ThreadUtil.logThreadDump("Thread dump at heartbeat monitor shutdown timeout.");
-              System.exit(1);
-            }, SHUTDOWN_TIMOUT_MS, TimeUnit.MILLISECONDS);
-          onContainerExpired.run();
-        }
-      }, 0, SCHEDULE_MS, TimeUnit.MILLISECONDS);
+      ContainerHeartbeatResponse response = containerHeartbeatClient.requestHeartbeat();
+      if (!response.isAlive()) {
+        scheduler.schedule(() -> {
+          // On timeout of container shutting down, force exit.
+          LOG.error("Graceful shutdown timeout expired. Force exiting.");
+          ThreadUtil.logThreadDump("Thread dump at heartbeat monitor shutdown timeout.");
+          System.exit(1);
+        }, SHUTDOWN_TIMOUT_MS, TimeUnit.MILLISECONDS);
+        onContainerExpired.run();
+      }
+    }, 0, SCHEDULE_MS, TimeUnit.MILLISECONDS);
     started = true;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java b/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java
index 864b558..34baad0 100644
--- a/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java
+++ b/samza-core/src/main/java/org/apache/samza/container/LocalityManager.java
@@ -61,13 +61,13 @@
   public Map<String, Map<String, String>> readContainerLocality() {
     Map<String, Map<String, String>> allMappings = new HashMap<>();
     metadataStore.all().forEach((containerId, valueBytes) -> {
-        if (valueBytes != null) {
-          String locationId = valueSerde.fromBytes(valueBytes);
-          Map<String, String> values = new HashMap<>();
-          values.put(SetContainerHostMapping.HOST_KEY, locationId);
-          allMappings.put(containerId, values);
-        }
-      });
+      if (valueBytes != null) {
+        String locationId = valueSerde.fromBytes(valueBytes);
+        Map<String, String> values = new HashMap<>();
+        values.put(SetContainerHostMapping.HOST_KEY, locationId);
+        allMappings.put(containerId, values);
+      }
+    });
     if (LOG.isDebugEnabled()) {
       for (Map.Entry<String, Map<String, String>> entry : allMappings.entrySet()) {
         LOG.debug(String.format("Locality for container %s: %s", entry.getKey(), entry.getValue()));
diff --git a/samza-core/src/main/java/org/apache/samza/container/RunLoop.java b/samza-core/src/main/java/org/apache/samza/container/RunLoop.java
index a509a27..f0968fc 100644
--- a/samza-core/src/main/java/org/apache/samza/container/RunLoop.java
+++ b/samza-core/src/main/java/org/apache/samza/container/RunLoop.java
@@ -23,7 +23,6 @@
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -51,7 +50,6 @@
 import org.apache.samza.util.ThrottlingScheduler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.collection.JavaConverters;
 
 
 /**
@@ -88,7 +86,7 @@
   private final boolean isAsyncCommitEnabled;
   private volatile boolean runLoopResumedSinceLastChecked;
 
-  public RunLoop(Map<TaskName, TaskInstance> taskInstances,
+  public RunLoop(Map<TaskName, RunLoopTask> runLoopTasks,
       ExecutorService threadPool,
       SystemConsumers consumerMultiplexer,
       int maxConcurrency,
@@ -111,16 +109,16 @@
     this.maxIdleMs = maxIdleMs;
     this.callbackTimer = (callbackTimeoutMs > 0) ? Executors.newSingleThreadScheduledExecutor() : null;
     this.callbackExecutor = new ThrottlingScheduler(maxThrottlingDelayMs);
-    this.coordinatorRequests = new CoordinatorRequests(taskInstances.keySet());
+    this.coordinatorRequests = new CoordinatorRequests(runLoopTasks.keySet());
     this.latch = new Object();
     this.workerTimer = Executors.newSingleThreadScheduledExecutor();
     this.clock = clock;
     Map<TaskName, AsyncTaskWorker> workers = new HashMap<>();
-    for (TaskInstance task : taskInstances.values()) {
+    for (RunLoopTask task : runLoopTasks.values()) {
       workers.put(task.taskName(), new AsyncTaskWorker(task));
     }
     // Partions and tasks assigned to the container will not change during the run loop life time
-    this.sspToTaskWorkerMapping = Collections.unmodifiableMap(getSspToAsyncTaskWorkerMap(taskInstances, workers));
+    this.sspToTaskWorkerMapping = Collections.unmodifiableMap(getSspToAsyncTaskWorkerMap(runLoopTasks, workers));
     this.taskWorkers = Collections.unmodifiableList(new ArrayList<>(workers.values()));
     this.isAsyncCommitEnabled = isAsyncCommitEnabled;
   }
@@ -129,10 +127,10 @@
    * Returns mapping of the SystemStreamPartition to the AsyncTaskWorkers to efficiently route the envelopes
    */
   private static Map<SystemStreamPartition, List<AsyncTaskWorker>> getSspToAsyncTaskWorkerMap(
-      Map<TaskName, TaskInstance> taskInstances, Map<TaskName, AsyncTaskWorker> taskWorkers) {
+      Map<TaskName, RunLoopTask> runLoopTasks, Map<TaskName, AsyncTaskWorker> taskWorkers) {
     Map<SystemStreamPartition, List<AsyncTaskWorker>> sspToWorkerMap = new HashMap<>();
-    for (TaskInstance task : taskInstances.values()) {
-      Set<SystemStreamPartition> ssps = JavaConverters.setAsJavaSetConverter(task.systemStreamPartitions()).asJava();
+    for (RunLoopTask task : runLoopTasks.values()) {
+      Set<SystemStreamPartition> ssps = task.systemStreamPartitions();
       for (SystemStreamPartition ssp : ssps) {
         sspToWorkerMap.putIfAbsent(ssp, new ArrayList<>());
         sspToWorkerMap.get(ssp).add(taskWorkers.get(task.taskName()));
@@ -155,12 +153,7 @@
 
       long prevNs = clock.nanoTime();
 
-      while (!shutdownNow) {
-        if (throwable != null) {
-          log.error("Caught throwable and stopping run loop", throwable);
-          throw new SamzaException(throwable);
-        }
-
+      while (!shutdownNow && throwable == null) {
         long startNs = clock.nanoTime();
 
         IncomingMessageEnvelope envelope = chooseEnvelope();
@@ -185,6 +178,17 @@
           containerMetrics.utilization().set(((double) activeNs) / totalNs);
         }
       }
+
+      /*
+       * The current semantics of external shutdown request (RunLoop.shutdown()) is loosely defined and run loop doesn't
+       * wait for inflight messages to complete and triggers shutdown as soon as it notices the shutdown request.
+       * Hence, it is possible that the exception may or may not propagated based on order of execution
+       * between process callback and run loop thread.
+       */
+      if (throwable != null) {
+        log.error("Caught throwable and stopping run loop", throwable);
+        throw new SamzaException(throwable);
+      }
     } finally {
       workerTimer.shutdown();
       callbackExecutor.shutdown();
@@ -355,15 +359,15 @@
    * will run the task asynchronously. It runs window and commit in the provided thread pool.
    */
   private class AsyncTaskWorker implements TaskCallbackListener {
-    private final TaskInstance task;
+    private final RunLoopTask task;
     private final TaskCallbackManager callbackManager;
     private volatile AsyncTaskState state;
 
-    AsyncTaskWorker(TaskInstance task) {
+    AsyncTaskWorker(RunLoopTask task) {
       this.task = task;
       this.callbackManager = new TaskCallbackManager(this, callbackTimer, callbackTimeoutMs, maxConcurrency, clock);
       Set<SystemStreamPartition> sspSet = getWorkingSSPSet(task);
-      this.state = new AsyncTaskState(task.taskName(), task.metrics(), sspSet, task.intermediateStreams().nonEmpty());
+      this.state = new AsyncTaskState(task.taskName(), task.metrics(), sspSet, !task.intermediateStreams().isEmpty());
     }
 
     private void init() {
@@ -393,8 +397,8 @@
       final EpochTimeScheduler epochTimeScheduler = task.epochTimeScheduler();
       if (epochTimeScheduler != null) {
         epochTimeScheduler.registerListener(() -> {
-            state.needScheduler();
-          });
+          state.needScheduler();
+        });
       }
     }
 
@@ -403,9 +407,9 @@
      * @param task
      * @return a Set of SSPs such that all SSPs are not at end of stream.
      */
-    private Set<SystemStreamPartition> getWorkingSSPSet(TaskInstance task) {
+    private Set<SystemStreamPartition> getWorkingSSPSet(RunLoopTask task) {
 
-      Set<SystemStreamPartition> allPartitions = new HashSet<>(JavaConverters.setAsJavaSetConverter(task.systemStreamPartitions()).asJava());
+      Set<SystemStreamPartition> allPartitions = task.systemStreamPartitions();
 
       // filter only those SSPs that are not at end of stream.
       Set<SystemStreamPartition> workingSSPSet = allPartitions.stream()
@@ -625,7 +629,9 @@
               log.trace("Update offset for ssp {}, offset {}", envelope.getSystemStreamPartition(), envelope.getOffset());
 
               // update offset
-              task.offsetManager().update(task.taskName(), envelope.getSystemStreamPartition(), envelope.getOffset());
+              if (task.offsetManager() != null) {
+                task.offsetManager().update(task.taskName(), envelope.getSystemStreamPartition(), envelope.getOffset());
+              }
 
               // update coordinator
               coordinatorRequests.update(callbackToUpdate.getCoordinator());
@@ -648,8 +654,10 @@
     @Override
     public void onFailure(TaskCallback callback, Throwable t) {
       try {
-        state.doneProcess();
+        // set the exception code ahead of marking the message as processed to make sure the exception
+        // is visible to the run loop thread promptly. Refer SAMZA-2510 for more details.
         abort(t);
+        state.doneProcess();
         // update pending count, but not offset
         TaskCallbackImpl callbackImpl = (TaskCallbackImpl) callback;
         log.error("Got callback failure for task {}", callbackImpl.getTaskName(), t);
diff --git a/samza-core/src/main/java/org/apache/samza/container/RunLoopFactory.java b/samza-core/src/main/java/org/apache/samza/container/RunLoopFactory.java
index 0e0a01c..e2069f4 100644
--- a/samza-core/src/main/java/org/apache/samza/container/RunLoopFactory.java
+++ b/samza-core/src/main/java/org/apache/samza/container/RunLoopFactory.java
@@ -19,14 +19,12 @@
 
 package org.apache.samza.container;
 
-import org.apache.samza.SamzaException;
 import org.apache.samza.config.TaskConfig;
 import org.apache.samza.system.SystemConsumers;
 import org.apache.samza.util.HighResolutionClock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.collection.JavaConverters;
-import scala.runtime.AbstractFunction1;
 import java.util.concurrent.ExecutorService;
 
 /**
@@ -36,7 +34,7 @@
 public class RunLoopFactory {
   private static final Logger log = LoggerFactory.getLogger(RunLoopFactory.class);
 
-  public static Runnable createRunLoop(scala.collection.immutable.Map<TaskName, TaskInstance> taskInstances,
+  public static Runnable createRunLoop(scala.collection.immutable.Map<TaskName, RunLoopTask> taskInstances,
       SystemConsumers consumerMultiplexer,
       ExecutorService threadPool,
       long maxThrottlingDelayMs,
@@ -52,18 +50,6 @@
 
     log.info("Got commit milliseconds: {}.", taskCommitMs);
 
-    int asyncTaskCount = taskInstances.values().count(new AbstractFunction1<TaskInstance, Object>() {
-      @Override
-      public Boolean apply(TaskInstance t) {
-        return t.isAsyncTask();
-      }
-    });
-
-    // asyncTaskCount should be either 0 or the number of all taskInstances
-    if (asyncTaskCount > 0 && asyncTaskCount < taskInstances.size()) {
-      throw new SamzaException("Mixing StreamTask and AsyncStreamTask is not supported");
-    }
-
     int taskMaxConcurrency = taskConfig.getMaxConcurrency();
     log.info("Got taskMaxConcurrency: {}.", taskMaxConcurrency);
 
diff --git a/samza-core/src/main/java/org/apache/samza/container/RunLoopTask.java b/samza-core/src/main/java/org/apache/samza/container/RunLoopTask.java
new file mode 100644
index 0000000..551da88
--- /dev/null
+++ b/samza-core/src/main/java/org/apache/samza/container/RunLoopTask.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.container;
+
+import java.util.Set;
+import org.apache.samza.checkpoint.OffsetManager;
+import org.apache.samza.scheduler.EpochTimeScheduler;
+import org.apache.samza.system.IncomingMessageEnvelope;
+import org.apache.samza.system.SystemStreamPartition;
+import org.apache.samza.task.ReadableCoordinator;
+import org.apache.samza.task.TaskCallbackFactory;
+
+
+/**
+ * The interface required for a task's execution to be managed within {@link RunLoop}.
+ *
+ * Some notes on thread safety and exclusivity between methods:
+ *
+ * TODO SAMZA-2531: isAsyncCommitEnabled is either an incomplete feature or misnamed
+ * RunLoop will ensure exclusivity between {@link #window}, {@link #commit}, {@link #scheduler}, and
+ * {@link #endOfStream}.
+ *
+ * There is an exception for {@link #process}, which may execute concurrently with {@link #commit} IF the encapsulating
+ * {@link RunLoop} has its isAsyncCommitEnabled set to true. In this case, the implementer of this interface should
+ * take care to ensure that any objects shared between commit and process are thread safe.
+ *
+ * Be aware that {@link #commit}, {@link #window} and {@link #scheduler} can be run in their own thread pool outside
+ * the main RunLoop thread (which executes {@link #process}) so may run concurrently between tasks. For example, one
+ * task may be executing a commit while another is executing window. For this reason, implementers of this class must
+ * ensure that objects shared between instances of RunLoopTask are thread safe.
+ */
+public interface RunLoopTask {
+
+  /**
+   * The {@link TaskName} associated with this RunLoopTask.
+   *
+   * @return taskName
+   */
+  TaskName taskName();
+
+  /**
+   * Process an incoming message envelope.
+   *
+   * @param envelope The envelope to be processed
+   * @param coordinator Manages execution of tasks
+   * @param callbackFactory Creates a callback to be used to indicate completion of or failure to process the
+   *                        envelope. {@link TaskCallbackFactory#createCallback()} should be called before processing
+   *                        begins.
+   */
+  void process(IncomingMessageEnvelope envelope, ReadableCoordinator coordinator, TaskCallbackFactory callbackFactory);
+
+  /**
+   * Performs a window for this task. If {@link #isWindowableTask()} is true, this method will be invoked periodically
+   * by {@link RunLoop} according to its windowMs.
+   *
+   * This method can be used to perform aggregations within a task.
+   *
+   * @param coordinator Manages execution of tasks
+   */
+  void window(ReadableCoordinator coordinator);
+
+  /**
+   * Used in conjunction with {@link #epochTimeScheduler()} to execute scheduled callbacks. See documentation of
+   * {@link EpochTimeScheduler} for more information.
+   *
+   * @param coordinator Manages execution of tasks.
+   */
+  void scheduler(ReadableCoordinator coordinator);
+
+  /**
+   * Performs a commit for this task. Operations for persisting checkpoint-related information for this task should
+   * be done here.
+   */
+  void commit();
+
+  /**
+   * Called when all {@link SystemStreamPartition} processed by a task have reached end of stream. This is called only
+   * once per task. {@link RunLoop} will issue a shutdown request to the coordinator immediately following the
+   * invocation of this method.
+   *
+   * @param coordinator manages execution of tasks.
+   */
+  void endOfStream(ReadableCoordinator coordinator);
+
+  /**
+   * Indicates whether {@link #window} should be invoked on this task. If true, {@link RunLoop}
+   * will schedule window to execute periodically according to its windowMs.
+   *
+   * @return whether the task should perform window
+   */
+  boolean isWindowableTask();
+
+  /**
+   * Whether this task has intermediate streams. Intermediate streams may be used to facilitate task processing
+   * before terminal output is produced. {@link RunLoop} uses this information to determine when the task has reached
+   * end of stream.
+   *
+   * @return whether the task uses intermediate streams
+   */
+  Set<String> intermediateStreams();
+
+  /**
+   * The set of {@link SystemStreamPartition} this task consumes from.
+   *
+   * @return the set of SSPs
+   */
+  Set<SystemStreamPartition> systemStreamPartitions();
+
+  /**
+   * An {@link OffsetManager}, if any, to use to track offsets for each input SSP. Offsets will be updated after successful
+   * completion of an envelope from an SSP.
+   *
+   * @return the offset manager, or null otherwise
+   */
+  OffsetManager offsetManager();
+
+  /**
+   * The metrics instance {@link RunLoop} will use to emit metrics related to execution of this task.
+   *
+   * @return metrics instance for this task
+   */
+  TaskInstanceMetrics metrics();
+
+  /**
+   * An {@link EpochTimeScheduler}, if any, used by the task to handle timer based callbacks.
+   *
+   * @return the scheduler, or null otherwise
+   */
+  EpochTimeScheduler epochTimeScheduler();
+}
\ No newline at end of file
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java
index dc9b2f4..a8aaecc 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/AllSspToSingleTaskGrouperFactory.java
@@ -61,10 +61,10 @@
     }
 
     processorList.forEach(processor -> {
-        // Create a task name for each processor and assign all partitions to each task name.
-        final TaskName taskName = new TaskName(String.format("Task-%s", processor));
-        groupedMap.put(taskName, ssps);
-      });
+      // Create a task name for each processor and assign all partitions to each task name.
+      final TaskName taskName = new TaskName(String.format("Task-%s", processor));
+      groupedMap.put(taskName, ssps);
+    });
 
     return groupedMap;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java
index 6507046..808f488 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/stream/SSPGrouperProxy.java
@@ -166,11 +166,11 @@
   private Map<SystemStream, Integer> getSystemStreamToPartitionCount(Map<TaskName, List<SystemStreamPartition>> taskToSSPAssignment) {
     Map<SystemStream, Integer> systemStreamToPartitionCount = new HashMap<>();
     taskToSSPAssignment.forEach((taskName, systemStreamPartitions) -> {
-        systemStreamPartitions.forEach(systemStreamPartition -> {
-            SystemStream systemStream = systemStreamPartition.getSystemStream();
-            systemStreamToPartitionCount.put(systemStream, systemStreamToPartitionCount.getOrDefault(systemStream, 0) + 1);
-          });
+      systemStreamPartitions.forEach(systemStreamPartition -> {
+        SystemStream systemStream = systemStreamPartition.getSystemStream();
+        systemStreamToPartitionCount.put(systemStream, systemStreamToPartitionCount.getOrDefault(systemStream, 0) + 1);
       });
+    });
 
     return systemStreamToPartitionCount;
   }
@@ -185,12 +185,12 @@
     Map<SystemStreamPartition, TaskName> sspToTaskMapping = new HashMap<>();
     Map<TaskName, List<SystemStreamPartition>> previousTaskToSSPAssignment = grouperMetadata.getPreviousTaskToSSPAssignment();
     previousTaskToSSPAssignment.forEach((taskName, systemStreamPartitions) -> {
-        systemStreamPartitions.forEach(systemStreamPartition -> {
-            if (!broadcastSystemStreamPartitions.contains(systemStreamPartition)) {
-              sspToTaskMapping.put(systemStreamPartition, taskName);
-            }
-          });
+      systemStreamPartitions.forEach(systemStreamPartition -> {
+        if (!broadcastSystemStreamPartitions.contains(systemStreamPartition)) {
+          sspToTaskMapping.put(systemStreamPartition, taskName);
+        }
       });
+    });
     return sspToTaskMapping;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java b/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java
index 7c11da4..aec7215 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/task/GroupByContainerIds.java
@@ -172,11 +172,11 @@
 
     // Generate the {@see LocationId} to processors mapping and processorId to {@see TaskGroup} mapping.
     processorLocality.forEach((processorId, locationId) -> {
-        List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>());
-        processorIds.add(processorId);
-        locationIdToProcessors.put(locationId, processorIds);
-        processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>()));
-      });
+      List<String> processorIds = locationIdToProcessors.getOrDefault(locationId, new ArrayList<>());
+      processorIds.add(processorId);
+      locationIdToProcessors.put(locationId, processorIds);
+      processorIdToTaskGroup.put(processorId, new TaskGroup(processorId, new ArrayList<>()));
+    });
 
     int numTasksPerProcessor = taskModels.size() / processorLocality.size();
     Set<TaskName> assignedTasks = new HashSet<>();
diff --git a/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java b/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java
index e9fcadb..0a22c2d 100644
--- a/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java
+++ b/samza-core/src/main/java/org/apache/samza/container/grouper/task/TaskAssignmentManager.java
@@ -74,24 +74,24 @@
   public Map<String, String> readTaskAssignment() {
     taskNameToContainerId.clear();
     taskContainerMappingMetadataStore.all().forEach((taskName, valueBytes) -> {
-        String containerId = containerIdSerde.fromBytes(valueBytes);
-        if (containerId != null) {
-          taskNameToContainerId.put(taskName, containerId);
-        }
-        LOG.debug("Assignment for task {}: {}", taskName, containerId);
-      });
+      String containerId = containerIdSerde.fromBytes(valueBytes);
+      if (containerId != null) {
+        taskNameToContainerId.put(taskName, containerId);
+      }
+      LOG.debug("Assignment for task {}: {}", taskName, containerId);
+    });
     return Collections.unmodifiableMap(new HashMap<>(taskNameToContainerId));
   }
 
   public Map<TaskName, TaskMode> readTaskModes() {
     Map<TaskName, TaskMode> taskModeMap = new HashMap<>();
     taskModeMappingMetadataStore.all().forEach((taskName, valueBytes) -> {
-        String taskMode = taskModeSerde.fromBytes(valueBytes);
-        if (taskMode != null) {
-          taskModeMap.put(new TaskName(taskName), TaskMode.valueOf(taskMode));
-        }
-        LOG.debug("Task mode assignment for task {}: {}", taskName, taskMode);
-      });
+      String taskMode = taskModeSerde.fromBytes(valueBytes);
+      if (taskMode != null) {
+        taskModeMap.put(new TaskName(taskName), TaskMode.valueOf(taskMode));
+      }
+      LOG.debug("Task mode assignment for task {}: {}", taskName, taskMode);
+    });
     return Collections.unmodifiableMap(new HashMap<>(taskModeMap));
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java b/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java
index f4cd527..f5b99d7 100644
--- a/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java
+++ b/samza-core/src/main/java/org/apache/samza/coordinator/metadatastore/NamespaceAwareCoordinatorStreamStore.java
@@ -106,15 +106,15 @@
     Map<String, byte[]> bootstrappedMessages = new HashMap<>();
     Map<String, byte[]> coordinatorStreamMessages = metadataStore.all();
     coordinatorStreamMessages.forEach((coordinatorMessageKeyAsJson, value) -> {
-        CoordinatorMessageKey coordinatorMessageKey = CoordinatorStreamStore.deserializeCoordinatorMessageKeyFromJson(coordinatorMessageKeyAsJson);
-        if (Objects.equals(namespace, coordinatorMessageKey.getNamespace())) {
-          if (value != null) {
-            bootstrappedMessages.put(coordinatorMessageKey.getKey(), value);
-          } else {
-            bootstrappedMessages.remove(coordinatorMessageKey.getKey());
-          }
+      CoordinatorMessageKey coordinatorMessageKey = CoordinatorStreamStore.deserializeCoordinatorMessageKeyFromJson(coordinatorMessageKeyAsJson);
+      if (Objects.equals(namespace, coordinatorMessageKey.getNamespace())) {
+        if (value != null) {
+          bootstrappedMessages.put(coordinatorMessageKey.getKey(), value);
+        } else {
+          bootstrappedMessages.remove(coordinatorMessageKey.getKey());
         }
-      });
+      }
+    });
 
     return bootstrappedMessages;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/coordinator/stream/CoordinatorStreamWriter.java b/samza-core/src/main/java/org/apache/samza/coordinator/stream/CoordinatorStreamWriter.java
index 3d837ac..da65981 100644
--- a/samza-core/src/main/java/org/apache/samza/coordinator/stream/CoordinatorStreamWriter.java
+++ b/samza-core/src/main/java/org/apache/samza/coordinator/stream/CoordinatorStreamWriter.java
@@ -103,11 +103,9 @@
    * Main function for using the CoordinatorStreamWriter. The main function starts a CoordinatorStreamWriter
    * and sends control messages.
    * To run the code use the following command:
-   * {path to samza deployment}/samza/bin/run-coordinator-stream-writer.sh  --config job.config.loader.factory={config--loader-factory} --config job.config.loader.properties{properties needed for config loader to load config} --type={type of the message} --key={[optional] key of the message} --value={[optional] value of the message}
+   * {path to samza deployment}/samza/bin/run-coordinator-stream-writer.sh  --config job.config.loader.factory={config-loader-factory} --config job.config.loader.properties{properties needed for config loader to load config} --type={type of the message} --key={[optional] key of the message} --value={[optional] value of the message}
    *
    * @param args input arguments for running the writer. These arguments are:
-   *             "config-factory" = The config file factory
-   *             "config-path" = The path to config file of a job
    *             "type" = type of the message being written
    *             "key" = [optional] key of the message being written
    *             "value" = [optional] value of the message being written
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java b/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java
index 0da1fd5..72b3cee 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobGraph.java
@@ -285,15 +285,15 @@
    */
   private void validateInputStreams() {
     inputStreams.forEach(edge -> {
-        if (!edge.getSourceNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Source stream %s should not have producers.", edge.getName()));
-        }
-        if (edge.getTargetNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Source stream %s should have consumers.", edge.getName()));
-        }
-      });
+      if (!edge.getSourceNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Source stream %s should not have producers.", edge.getName()));
+      }
+      if (edge.getTargetNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Source stream %s should have consumers.", edge.getName()));
+      }
+    });
   }
 
   /**
@@ -301,15 +301,15 @@
    */
   private void validateOutputStreams() {
     outputStreams.forEach(edge -> {
-        if (!edge.getTargetNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Sink stream %s should not have consumers", edge.getName()));
-        }
-        if (edge.getSourceNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Sink stream %s should have producers", edge.getName()));
-        }
-      });
+      if (!edge.getTargetNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Sink stream %s should not have consumers", edge.getName()));
+      }
+      if (edge.getSourceNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Sink stream %s should have producers", edge.getName()));
+      }
+    });
   }
 
   /**
@@ -322,11 +322,11 @@
     internalEdges.removeAll(outputStreams);
 
     internalEdges.forEach(edge -> {
-        if (edge.getSourceNodes().isEmpty() || edge.getTargetNodes().isEmpty()) {
-          throw new IllegalArgumentException(
-              String.format("Internal stream %s should have both producers and consumers", edge.getName()));
-        }
-      });
+      if (edge.getSourceNodes().isEmpty() || edge.getTargetNodes().isEmpty()) {
+        throw new IllegalArgumentException(
+            String.format("Internal stream %s should have both producers and consumers", edge.getName()));
+      }
+    });
   }
 
   /**
@@ -352,19 +352,19 @@
     Set<JobNode> visited = new HashSet<>();
 
     inputStreams.forEach(input -> {
-        List<JobNode> next = input.getTargetNodes();
-        queue.addAll(next);
-        visited.addAll(next);
-      });
+      List<JobNode> next = input.getTargetNodes();
+      queue.addAll(next);
+      visited.addAll(next);
+    });
 
     while (!queue.isEmpty()) {
       JobNode node = queue.poll();
       node.getOutEdges().values().stream().flatMap(edge -> edge.getTargetNodes().stream()).forEach(target -> {
-          if (!visited.contains(target)) {
-            visited.add(target);
-            queue.offer(target);
-          }
-        });
+        if (!visited.contains(target)) {
+          visited.add(target);
+          queue.offer(target);
+        }
+      });
     }
 
     return visited;
@@ -385,17 +385,17 @@
     Map<String, Long> indegree = new HashMap<>();
     Set<JobNode> visited = new HashSet<>();
     pnodes.forEach(node -> {
-        String nid = node.getJobNameAndId();
-        //only count the degrees of intermediate streams
-        long degree = node.getInEdges().values().stream().filter(e -> !inputStreams.contains(e)).count();
-        indegree.put(nid, degree);
+      String nid = node.getJobNameAndId();
+      //only count the degrees of intermediate streams
+      long degree = node.getInEdges().values().stream().filter(e -> !inputStreams.contains(e)).count();
+      indegree.put(nid, degree);
 
-        if (degree == 0L) {
-          // start from the nodes that has no intermediate input streams, so it only consumes from input streams
-          q.add(node);
-          visited.add(node);
-        }
-      });
+      if (degree == 0L) {
+        // start from the nodes that has no intermediate input streams, so it only consumes from input streams
+        q.add(node);
+        visited.add(node);
+      }
+    });
 
     List<JobNode> sortedNodes = new ArrayList<>();
     Set<JobNode> reachable = new HashSet<>();
@@ -413,15 +413,15 @@
         JobNode node = q.poll();
         sortedNodes.add(node);
         node.getOutEdges().values().stream().flatMap(edge -> edge.getTargetNodes().stream()).forEach(n -> {
-            String nid = n.getJobNameAndId();
-            Long degree = indegree.get(nid) - 1;
-            indegree.put(nid, degree);
-            if (degree == 0L && !visited.contains(n)) {
-              q.add(n);
-              visited.add(n);
-            }
-            reachable.add(n);
-          });
+          String nid = n.getJobNameAndId();
+          Long degree = indegree.get(nid) - 1;
+          indegree.put(nid, degree);
+          if (degree == 0L && !visited.contains(n)) {
+            q.add(n);
+            visited.add(n);
+          }
+          reachable.add(n);
+        });
       }
 
       if (sortedNodes.size() < pnodes.size()) {
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java b/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java
index 4b11174..e42b530 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobGraphJsonGenerator.java
@@ -19,6 +19,7 @@
 
 package org.apache.samza.execution;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.io.ByteArrayOutputStream;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -35,6 +36,7 @@
 import org.apache.samza.operators.spec.OutputOperatorSpec;
 import org.apache.samza.operators.spec.OutputStreamImpl;
 import org.apache.samza.operators.spec.PartitionByOperatorSpec;
+import org.apache.samza.operators.spec.SendToTableOperatorSpec;
 import org.apache.samza.operators.spec.StreamTableJoinOperatorSpec;
 import org.apache.samza.table.descriptors.BaseTableDescriptor;
 import org.apache.samza.table.descriptors.TableDescriptor;
@@ -164,7 +166,8 @@
    * @param spec a {@link OperatorSpec} instance
    * @return map of the operator properties
    */
-  private Map<String, Object> operatorToMap(OperatorSpec spec) {
+  @VisibleForTesting
+  Map<String, Object> operatorToMap(OperatorSpec spec) {
     Map<String, Object> map = new HashMap<>();
     map.put("opCode", spec.getOpCode().name());
     map.put("opId", spec.getOpId());
@@ -186,8 +189,8 @@
       map.put("tableId", tableId);
     }
 
-    if (spec instanceof StreamTableJoinOperatorSpec) {
-      String tableId = ((StreamTableJoinOperatorSpec) spec).getTableId();
+    if (spec instanceof SendToTableOperatorSpec) {
+      String tableId = ((SendToTableOperatorSpec) spec).getTableId();
       map.put("tableId", tableId);
     }
 
@@ -222,19 +225,19 @@
     OperatorGraphJson opGraph = new OperatorGraphJson();
     opGraph.inputStreams = new ArrayList<>();
     jobNode.getInEdges().values().forEach(inStream -> {
-        StreamJson inputJson = new StreamJson();
-        opGraph.inputStreams.add(inputJson);
-        inputJson.streamId = inStream.getStreamSpec().getId();
-        inputJson.nextOperatorIds = jobNode.getNextOperatorIds(inputJson.streamId);
-        updateOperatorGraphJson(jobNode.getInputOperator(inputJson.streamId), opGraph);
-      });
+      StreamJson inputJson = new StreamJson();
+      opGraph.inputStreams.add(inputJson);
+      inputJson.streamId = inStream.getStreamSpec().getId();
+      inputJson.nextOperatorIds = jobNode.getNextOperatorIds(inputJson.streamId);
+      updateOperatorGraphJson(jobNode.getInputOperator(inputJson.streamId), opGraph);
+    });
 
     opGraph.outputStreams = new ArrayList<>();
     jobNode.getOutEdges().values().forEach(outStream -> {
-        StreamJson outputJson = new StreamJson();
-        outputJson.streamId = outStream.getStreamSpec().getId();
-        opGraph.outputStreams.add(outputJson);
-      });
+      StreamJson outputJson = new StreamJson();
+      outputJson.streamId = outStream.getStreamSpec().getId();
+      opGraph.outputStreams.add(outputJson);
+    });
     return opGraph;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobNode.java b/samza-core/src/main/java/org/apache/samza/execution/JobNode.java
index e4fbdba..28bdff1 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobNode.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobNode.java
@@ -172,11 +172,11 @@
   private void findReachableOperators(Collection<OperatorSpec> inputOperatorsInJobNode,
       Set<OperatorSpec> reachableOperators) {
     inputOperatorsInJobNode.forEach(op -> {
-        if (reachableOperators.contains(op)) {
-          return;
-        }
-        reachableOperators.add(op);
-        findReachableOperators(op.getRegisteredOperatorSpecs(), reachableOperators);
-      });
+      if (reachableOperators.contains(op)) {
+        return;
+      }
+      reachableOperators.add(op);
+      findReachableOperators(op.getRegisteredOperatorSpecs(), reachableOperators);
+    });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java b/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java
index 4ae4886..d87530d 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobNodeConfigurationGenerator.java
@@ -74,11 +74,11 @@
     Map<String, String> mergedConfig = new HashMap<>(generatedConfig);
 
     originalConfig.forEach((k, v) -> {
-        if (generatedConfig.containsKey(k) && !Objects.equals(generatedConfig.get(k), v)) {
-          LOG.info("Replacing generated config for key: {} value: {} with original config value: {}", k, generatedConfig.get(k), v);
-        }
-        mergedConfig.put(k, v);
-      });
+      if (generatedConfig.containsKey(k) && !Objects.equals(generatedConfig.get(k), v)) {
+        LOG.info("Replacing generated config for key: {} value: {} with original config value: {}", k, generatedConfig.get(k), v);
+      }
+      mergedConfig.put(k, v);
+    });
 
     return ConfigUtil.rewriteConfig(new MapConfig(mergedConfig));
   }
@@ -243,20 +243,20 @@
 
     // Add side inputs to the inputs and mark the stream as bootstrap
     tables.values().forEach(tableDescriptor -> {
-        if (tableDescriptor instanceof LocalTableDescriptor) {
-          LocalTableDescriptor localTableDescriptor = (LocalTableDescriptor) tableDescriptor;
-          List<String> sideInputs = localTableDescriptor.getSideInputs();
-          if (sideInputs != null && !sideInputs.isEmpty()) {
-            sideInputs.stream()
-                .map(sideInput -> StreamUtil.getSystemStreamFromNameOrId(originalConfig, sideInput))
-                .forEach(systemStream -> {
-                    inputs.add(StreamUtil.getNameFromSystemStream(systemStream));
-                    generatedConfig.put(String.format(StreamConfig.STREAM_PREFIX + StreamConfig.BOOTSTRAP,
-                        systemStream.getSystem(), systemStream.getStream()), "true");
-                  });
-          }
+      if (tableDescriptor instanceof LocalTableDescriptor) {
+        LocalTableDescriptor localTableDescriptor = (LocalTableDescriptor) tableDescriptor;
+        List<String> sideInputs = localTableDescriptor.getSideInputs();
+        if (sideInputs != null && !sideInputs.isEmpty()) {
+          sideInputs.stream()
+              .map(sideInput -> StreamUtil.getSystemStreamFromNameOrId(originalConfig, sideInput))
+              .forEach(systemStream -> {
+                inputs.add(StreamUtil.getNameFromSystemStream(systemStream));
+                generatedConfig.put(String.format(StreamConfig.STREAM_PREFIX + StreamConfig.BOOTSTRAP,
+                    systemStream.getSystem(), systemStream.getStream()), "true");
+              });
         }
-      });
+      }
+    });
   }
 
   /**
@@ -285,15 +285,15 @@
     Map<String, Serde> storeKeySerdes = new HashMap<>();
     Map<String, Serde> storeMsgSerdes = new HashMap<>();
     stores.forEach(storeDescriptor -> {
-        storeKeySerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getKeySerde());
-        storeMsgSerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getMsgSerde());
-      });
+      storeKeySerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getKeySerde());
+      storeMsgSerdes.put(storeDescriptor.getStoreName(), storeDescriptor.getMsgSerde());
+    });
 
     Map<String, Serde> tableKeySerdes = new HashMap<>();
     Map<String, Serde> tableMsgSerdes = new HashMap<>();
     tables.forEach(tableId -> {
-        addSerdes(jobNode.getTableSerdes(tableId), tableId, tableKeySerdes, tableMsgSerdes);
-      });
+      addSerdes(jobNode.getTableSerdes(tableId), tableId, tableKeySerdes, tableMsgSerdes);
+    });
 
     // for each unique stream or store serde instance, generate a unique name and serialize to config
     HashSet<Serde> serdes = new HashSet<>(streamKeySerdes.values());
@@ -306,46 +306,46 @@
     Base64.Encoder base64Encoder = Base64.getEncoder();
     Map<Serde, String> serdeUUIDs = new HashMap<>();
     serdes.forEach(serde -> {
-        String serdeName = serdeUUIDs.computeIfAbsent(serde,
-            s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
-        configs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
-            base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
-      });
+      String serdeName = serdeUUIDs.computeIfAbsent(serde,
+        s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
+      configs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
+          base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
+    });
 
     // set key and msg serdes for streams to the serde names generated above
     streamKeySerdes.forEach((streamId, serde) -> {
-        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
-        String keySerdeConfigKey = streamIdPrefix + StreamConfig.KEY_SERDE;
-        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
+      String keySerdeConfigKey = streamIdPrefix + StreamConfig.KEY_SERDE;
+      configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     streamMsgSerdes.forEach((streamId, serde) -> {
-        String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
-        String valueSerdeConfigKey = streamIdPrefix + StreamConfig.MSG_SERDE;
-        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String streamIdPrefix = String.format(StreamConfig.STREAM_ID_PREFIX, streamId);
+      String valueSerdeConfigKey = streamIdPrefix + StreamConfig.MSG_SERDE;
+      configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     // set key and msg serdes for stores to the serde names generated above
     storeKeySerdes.forEach((storeName, serde) -> {
-        String keySerdeConfigKey = String.format(StorageConfig.KEY_SERDE, storeName);
-        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String keySerdeConfigKey = String.format(StorageConfig.KEY_SERDE, storeName);
+      configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     storeMsgSerdes.forEach((storeName, serde) -> {
-        String msgSerdeConfigKey = String.format(StorageConfig.MSG_SERDE, storeName);
-        configs.put(msgSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String msgSerdeConfigKey = String.format(StorageConfig.MSG_SERDE, storeName);
+      configs.put(msgSerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     // set key and msg serdes for stores to the serde names generated above
     tableKeySerdes.forEach((tableId, serde) -> {
-        String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
-        configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
+      configs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
 
     tableMsgSerdes.forEach((tableId, serde) -> {
-        String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
-        configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
+      configs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
+    });
   }
 
   private void addSerdes(KV<Serde, Serde> serdes, String streamId, Map<String, Serde> keySerdeMap,
diff --git a/samza-core/src/main/java/org/apache/samza/execution/JobPlanner.java b/samza-core/src/main/java/org/apache/samza/execution/JobPlanner.java
index 72c2972..416a769 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/JobPlanner.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/JobPlanner.java
@@ -83,13 +83,20 @@
       if (StringUtils.isBlank(userConfig.get(TaskConfig.INPUT_STREAMS))) {
         allowedUserConfig.remove(TaskConfig.INPUT_STREAMS);
       }
-      generatedConfig.putAll(getGeneratedConfig(runId));
+      generatedConfig.putAll(getGeneratedConfig());
     }
 
     if (ApplicationConfig.ApplicationMode.BATCH.name().equals(generatedConfig.get(ApplicationConfig.APP_MODE))) {
       allowedUserConfig.remove(ClusterManagerConfig.JOB_HOST_AFFINITY_ENABLED);
     }
 
+    // APP_RUN_ID should be generated for both LegacyTaskApplications & descriptor based applications
+    // This config is used in BATCH mode to create new intermediate streams on runs and in stream mode use by
+    // Container Placements to identify a deployment of Samza
+    if (StringUtils.isNoneEmpty(runId)) {
+      generatedConfig.put(ApplicationConfig.APP_RUN_ID, runId);
+    }
+
     // merge user-provided configuration with generated configuration. generated configuration has lower priority.
     Config mergedConfig = JobNodeConfigurationGenerator.mergeConfig(allowedUserConfig, generatedConfig);
 
@@ -125,11 +132,8 @@
     }
   }
 
-  private Map<String, String> getGeneratedConfig(String runId) {
+  private Map<String, String> getGeneratedConfig() {
     Map<String, String> generatedConfig = new HashMap<>();
-    if (StringUtils.isNoneEmpty(runId)) {
-      generatedConfig.put(ApplicationConfig.APP_RUN_ID, runId);
-    }
 
     Map<String, String> systemStreamConfigs = generateSystemStreamConfigs(appDesc);
     generatedConfig.putAll(systemStreamConfigs);
diff --git a/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java b/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java
index 4999d06..63690b7 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/StreamEdge.java
@@ -136,8 +136,8 @@
       streamConfig.put(String.format(StreamConfig.PRIORITY_FOR_STREAM_ID, streamId), String.valueOf(Integer.MAX_VALUE));
     }
     spec.getConfig().forEach((property, value) -> {
-        streamConfig.put(String.format(StreamConfig.STREAM_ID_PREFIX, streamId) + property, value);
-      });
+      streamConfig.put(String.format(StreamConfig.STREAM_ID_PREFIX, streamId) + property, value);
+    });
 
     return new MapConfig(streamConfig);
   }
diff --git a/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java b/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java
index 26bc348..f75a8dc 100644
--- a/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java
+++ b/samza-core/src/main/java/org/apache/samza/execution/StreamManager.java
@@ -117,9 +117,9 @@
           .map(id -> new StreamSpec(id, streamConfig.getPhysicalName(id), streamConfig.getSystem(id)))
           .collect(Collectors.toSet());
       intStreams.forEach(stream -> {
-          LOGGER.info("Clear intermediate stream {} in system {}", stream.getPhysicalName(), stream.getSystemName());
-          systemAdmins.getSystemAdmin(stream.getSystemName()).clearStream(stream);
-        });
+        LOGGER.info("Clear intermediate stream {} in system {}", stream.getPhysicalName(), stream.getSystemName());
+        systemAdmins.getSystemAdmin(stream.getSystemName()).clearStream(stream);
+      });
 
       //Find checkpoint stream and clean up
       TaskConfig taskConfig = new TaskConfig(prevConfig);
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java b/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java
index d4782b0..779644d 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/ControlMessageSender.java
@@ -75,11 +75,11 @@
 
   private int getPartitionCount(SystemStream systemStream) {
     return PARTITION_COUNT_CACHE.computeIfAbsent(systemStream, ss -> {
-        SystemStreamMetadata metadata = metadataCache.getSystemStreamMetadata(ss, true);
-        if (metadata == null) {
-          throw new SamzaException("Unable to find metadata for stream " + systemStream);
-        }
-        return metadata.getSystemStreamPartitionMetadata().size();
-      });
+      SystemStreamMetadata metadata = metadataCache.getSystemStreamMetadata(ss, true);
+      if (metadata == null) {
+        throw new SamzaException("Unable to find metadata for stream " + systemStream);
+      }
+      return metadata.getSystemStreamPartitionMetadata().size();
+    });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java b/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java
index 8c9db61..7d9c597 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/EndOfStreamStates.java
@@ -75,8 +75,8 @@
   EndOfStreamStates(Set<SystemStreamPartition> ssps, Map<SystemStream, Integer> producerTaskCounts) {
     Map<SystemStreamPartition, EndOfStreamState> states = new HashMap<>();
     ssps.forEach(ssp -> {
-        states.put(ssp, new EndOfStreamState(producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0)));
-      });
+      states.put(ssp, new EndOfStreamState(producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0)));
+    });
     this.eosStates = Collections.unmodifiableMap(states);
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java
index 528acc6..9fd35eb 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImpl.java
@@ -182,14 +182,14 @@
     }
 
     CompletionStage<Void> result = completableResultsFuture.thenCompose(results -> {
-        long endNs = this.highResClock.nanoTime();
-        this.handleMessageNs.update(endNs - startNs);
+      long endNs = this.highResClock.nanoTime();
+      this.handleMessageNs.update(endNs - startNs);
 
-        return CompletableFuture.allOf(results.stream()
-            .flatMap(r -> this.registeredOperators.stream()
-              .map(op -> op.onMessageAsync(r, collector, coordinator)))
-            .toArray(CompletableFuture[]::new));
-      });
+      return CompletableFuture.allOf(results.stream()
+          .flatMap(r -> this.registeredOperators.stream()
+            .map(op -> op.onMessageAsync(r, collector, coordinator)))
+          .toArray(CompletableFuture[]::new));
+    });
 
     WatermarkFunction watermarkFn = getOperatorSpec().getWatermarkFn();
     if (watermarkFn != null) {
@@ -281,13 +281,13 @@
       // populate the end-of-stream through the dag
       endOfStreamFuture = onEndOfStream(collector, coordinator)
           .thenAccept(result -> {
-              if (eosStates.allEndOfStream()) {
-                // all inputs have been end-of-stream, shut down the task
-                LOG.info("All input streams have reached the end for task {}", taskName.getTaskName());
-                coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
-                coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
-              }
-            });
+            if (eosStates.allEndOfStream()) {
+              // all inputs have been end-of-stream, shut down the task
+              LOG.info("All input streams have reached the end for task {}", taskName.getTaskName());
+              coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+              coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+            }
+          });
     }
 
     return endOfStreamFuture;
@@ -485,24 +485,24 @@
       @Override
       public void schedule(K key, long time) {
         callbackScheduler.scheduleCallback(key, time, (k, collector, coordinator) -> {
-            final ScheduledFunction<K, RM> scheduledFn = getOperatorSpec().getScheduledFn();
-            if (scheduledFn != null) {
-              final Collection<RM> output = scheduledFn.onCallback(key, time);
+          final ScheduledFunction<K, RM> scheduledFn = getOperatorSpec().getScheduledFn();
+          if (scheduledFn != null) {
+            final Collection<RM> output = scheduledFn.onCallback(key, time);
 
-              if (!output.isEmpty()) {
-                CompletableFuture<Void> timerFuture = CompletableFuture.allOf(output.stream()
-                    .flatMap(r -> registeredOperators.stream()
-                        .map(op -> op.onMessageAsync(r, collector, coordinator)))
-                    .toArray(CompletableFuture[]::new));
+            if (!output.isEmpty()) {
+              CompletableFuture<Void> timerFuture = CompletableFuture.allOf(output.stream()
+                  .flatMap(r -> registeredOperators.stream()
+                      .map(op -> op.onMessageAsync(r, collector, coordinator)))
+                  .toArray(CompletableFuture[]::new));
 
-                timerFuture.join();
-              }
-            } else {
-              throw new SamzaException(
-                  String.format("Operator %s id %s (created at %s) must implement ScheduledFunction to use system timer.",
-                      getOperatorSpec().getOpCode().name(), getOpImplId(), getOperatorSpec().getSourceLocation()));
+              timerFuture.join();
             }
-          });
+          } else {
+            throw new SamzaException(
+                String.format("Operator %s id %s (created at %s) must implement ScheduledFunction to use system timer.",
+                    getOperatorSpec().getOpCode().name(), getOpImplId(), getOperatorSpec().getSourceLocation()));
+          }
+        });
       }
 
       @Override
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java
index 4cf7201..705f0cb 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/OperatorImplGraph.java
@@ -108,8 +108,8 @@
                 getIntermediateToInputStreamsMap(specGraph, streamConfig))
             : Collections.EMPTY_MAP;
     producerTaskCounts.forEach((stream, count) -> {
-        LOG.info("{} has {} producer tasks.", stream, count);
-      });
+      LOG.info("{} has {} producer tasks.", stream, count);
+    });
 
     // set states for end-of-stream
     internalTaskContext.registerObject(EndOfStreamStates.class.getName(),
@@ -124,11 +124,11 @@
                 context.getContainerContext().getContainerMetricsRegistry()));
 
     specGraph.getInputOperators().forEach((streamId, inputOpSpec) -> {
-        SystemStream systemStream = streamConfig.streamIdToSystemStream(streamId);
-        InputOperatorImpl inputOperatorImpl =
-            (InputOperatorImpl) createAndRegisterOperatorImpl(null, inputOpSpec, systemStream, context);
-        this.inputOperators.put(systemStream, inputOperatorImpl);
-      });
+      SystemStream systemStream = streamConfig.streamIdToSystemStream(streamId);
+      InputOperatorImpl inputOperatorImpl =
+          (InputOperatorImpl) createAndRegisterOperatorImpl(null, inputOpSpec, systemStream, context);
+      this.inputOperators.put(systemStream, inputOperatorImpl);
+    });
   }
 
   /**
@@ -187,10 +187,10 @@
 
       Collection<OperatorSpec> registeredSpecs = operatorSpec.getRegisteredOperatorSpecs();
       registeredSpecs.forEach(registeredSpec -> {
-          LOG.debug("Creating operator {} with opCode: {}", registeredSpec.getOpId(), registeredSpec.getOpCode());
-          OperatorImpl nextImpl = createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context);
-          operatorImpl.registerNextOperator(nextImpl);
-        });
+        LOG.debug("Creating operator {} with opCode: {}", registeredSpec.getOpId(), registeredSpec.getOpCode());
+        OperatorImpl nextImpl = createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context);
+        operatorImpl.registerNextOperator(nextImpl);
+      });
       return operatorImpl;
     } else {
       // the implementation corresponding to operatorSpec has already been instantiated and registered.
@@ -200,7 +200,7 @@
       // We still need to traverse the DAG further to register the input streams.
       Collection<OperatorSpec> registeredSpecs = operatorSpec.getRegisteredOperatorSpecs();
       registeredSpecs.forEach(
-          registeredSpec -> createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context));
+        registeredSpec -> createAndRegisterOperatorImpl(operatorSpec, registeredSpec, inputStream, context));
       return operatorImpl;
     }
   }
@@ -255,7 +255,7 @@
       Clock clock) {
     // get the per task pair of PartialJoinOperatorImpl for the corresponding {@code joinOpSpec}
     KV<PartialJoinOperatorImpl, PartialJoinOperatorImpl> partialJoinOpImpls = joinOpImpls.computeIfAbsent(joinOpSpec.getOpId(),
-        joinOpId -> {
+      joinOpId -> {
         PartialJoinFunction leftJoinFn = createLeftJoinFn(joinOpSpec);
         PartialJoinFunction rightJoinFn = createRightJoinFn(joinOpSpec);
         return new KV(new PartialJoinOperatorImpl(joinOpSpec, true, leftJoinFn, rightJoinFn, clock),
@@ -365,12 +365,12 @@
   static Multimap<SystemStream, String> getStreamToConsumerTasks(JobModel jobModel) {
     Multimap<SystemStream, String> streamToConsumerTasks = HashMultimap.create();
     jobModel.getContainers().values().forEach(containerModel -> {
-        containerModel.getTasks().values().forEach(taskModel -> {
-            taskModel.getSystemStreamPartitions().forEach(ssp -> {
-                streamToConsumerTasks.put(ssp.getSystemStream(), taskModel.getTaskName().getTaskName());
-              });
-          });
+      containerModel.getTasks().values().forEach(taskModel -> {
+        taskModel.getSystemStreamPartitions().forEach(ssp -> {
+          streamToConsumerTasks.put(ssp.getSystemStream(), taskModel.getTaskName().getTaskName());
+        });
       });
+    });
     return streamToConsumerTasks;
   }
 
@@ -384,9 +384,9 @@
     Multimap<SystemStream, SystemStream> outputToInputStreams = HashMultimap.create();
     specGraph.getInputOperators().entrySet().stream()
         .forEach(entry -> {
-            SystemStream systemStream = streamConfig.streamIdToSystemStream(entry.getKey());
-            computeOutputToInput(systemStream, entry.getValue(), outputToInputStreams, streamConfig);
-          });
+          SystemStream systemStream = streamConfig.streamIdToSystemStream(entry.getKey());
+          computeOutputToInput(systemStream, entry.getValue(), outputToInputStreams, streamConfig);
+        });
     return outputToInputStreams;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java
index 657ba2a..2104c4e 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkMetrics.java
@@ -36,8 +36,8 @@
 
   void setAggregateTime(SystemStreamPartition systemStreamPartition, long time) {
     final Gauge<Long> aggregate = aggregates.computeIfAbsent(systemStreamPartition,
-        ssp -> newGauge(String.format("%s-%s-aggr-watermark",
-        ssp.getStream(), ssp.getPartition().getPartitionId()), 0L));
+      ssp -> newGauge(String.format("%s-%s-aggr-watermark",
+          ssp.getStream(), ssp.getPartition().getPartitionId()), 0L));
     aggregate.set(time);
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java
index b363b2c..84e0687 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/impl/WatermarkStates.java
@@ -94,12 +94,12 @@
     final List<SystemStreamPartition> intSsps = new ArrayList<>();
 
     ssps.forEach(ssp -> {
-        final int producerCount = producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0);
-        states.put(ssp, new WatermarkState(producerCount));
-        if (producerCount != 0) {
-          intSsps.add(ssp);
-        }
-      });
+      final int producerCount = producerTaskCounts.getOrDefault(ssp.getSystemStream(), 0);
+      states.put(ssp, new WatermarkState(producerCount));
+      if (producerCount != 0) {
+        intSsps.add(ssp);
+      }
+    });
     this.watermarkStates = Collections.unmodifiableMap(states);
     this.watermarkMetrics = new WatermarkMetrics(metricsRegistry);
     this.intermediateSsps = Collections.unmodifiableList(intSsps);
diff --git a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java
index 32bf988..b3df99d 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeSinceFirstMessageTriggerImpl.java
@@ -51,9 +51,9 @@
       long triggerDurationMs = trigger.getDuration().toMillis();
       Long callbackTime = now + triggerDurationMs;
       cancellable =  context.scheduleCallback(() -> {
-          LOG.trace("Time since first message trigger fired");
-          shouldFire = true;
-        }, callbackTime, triggerKey);
+        LOG.trace("Time since first message trigger fired");
+        shouldFire = true;
+      }, callbackTime, triggerKey);
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java
index 2454ce9..e8a4fe2 100644
--- a/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java
+++ b/samza-core/src/main/java/org/apache/samza/operators/triggers/TimeTriggerImpl.java
@@ -51,9 +51,9 @@
 
     if (cancellable == null) {
       cancellable = context.scheduleCallback(() -> {
-          LOG.trace("Time trigger fired");
-          shouldFire = true;
-        }, callbackTime, triggerKey);
+        LOG.trace("Time trigger fired");
+        shouldFire = true;
+      }, callbackTime, triggerKey);
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java b/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java
index 40d3d5c..4470ae7 100644
--- a/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java
+++ b/samza-core/src/main/java/org/apache/samza/runtime/ContainerLaunchUtil.java
@@ -52,6 +52,7 @@
 import org.apache.samza.util.ScalaJavaUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
 import scala.Option;
 
 
@@ -78,6 +79,12 @@
       String jobName, String jobId, String containerId, Optional<String> execEnvContainerId,
       JobModel jobModel) {
 
+    // populate MDC for logging
+    MDC.put("containerName", "samza-container-" + containerId);
+    MDC.put("jobName", jobName);
+    MDC.put("jobId", jobId);
+
+
     Config config = jobModel.getConfig();
     DiagnosticsUtil.writeMetadataFile(jobName, jobId, containerId, execEnvContainerId, config);
     run(appDesc, jobName, jobId, containerId, execEnvContainerId, jobModel, config, buildExternalContext(config));
@@ -190,14 +197,14 @@
     if (executionEnvContainerId != null) {
       log.info("Got execution environment container id: {}", executionEnvContainerId);
       return new ContainerHeartbeatMonitor(() -> {
-          try {
-            container.shutdown();
-            containerRunnerException = new SamzaException("Container shutdown due to expired heartbeat");
-          } catch (Exception e) {
-            log.error("Heartbeat monitor failed to shutdown the container gracefully. Exiting process.", e);
-            System.exit(1);
-          }
-        }, new ContainerHeartbeatClient(coordinatorUrl, executionEnvContainerId));
+        try {
+          container.shutdown();
+          containerRunnerException = new SamzaException("Container shutdown due to expired heartbeat");
+        } catch (Exception e) {
+          log.error("Heartbeat monitor failed to shutdown the container gracefully. Exiting process.", e);
+          System.exit(1);
+        }
+      }, new ContainerHeartbeatClient(coordinatorUrl, executionEnvContainerId));
     } else {
       log.warn("Execution environment container id not set. Container heartbeat monitor will not be created");
       return null;
diff --git a/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java b/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java
index 8c3c029..37b2c0b 100644
--- a/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java
+++ b/samza-core/src/main/java/org/apache/samza/runtime/LocalApplicationRunner.java
@@ -226,15 +226,15 @@
         throw new SamzaException("No jobs to run.");
       }
       jobConfigs.forEach(jobConfig -> {
-          LOG.debug("Starting job {} StreamProcessor with config {}", jobConfig.getName(), jobConfig);
-          MetadataStore coordinatorStreamStore = createCoordinatorStreamStore(jobConfig);
-          if (coordinatorStreamStore != null) {
-            coordinatorStreamStore.init();
-          }
-          StreamProcessor processor = createStreamProcessor(jobConfig, appDesc,
-              sp -> new LocalStreamProcessorLifecycleListener(sp, jobConfig), Optional.ofNullable(externalContext), coordinatorStreamStore);
-          processors.add(Pair.of(processor, coordinatorStreamStore));
-        });
+        LOG.debug("Starting job {} StreamProcessor with config {}", jobConfig.getName(), jobConfig);
+        MetadataStore coordinatorStreamStore = createCoordinatorStreamStore(jobConfig);
+        if (coordinatorStreamStore != null) {
+          coordinatorStreamStore.init();
+        }
+        StreamProcessor processor = createStreamProcessor(jobConfig, appDesc,
+          sp -> new LocalStreamProcessorLifecycleListener(sp, jobConfig), Optional.ofNullable(externalContext), coordinatorStreamStore);
+        processors.add(Pair.of(processor, coordinatorStreamStore));
+      });
       numProcessorsToStart.set(processors.size());
 
       // start the StreamProcessors
@@ -251,13 +251,13 @@
   @Override
   public void kill() {
     processors.forEach(sp -> {
-        sp.getLeft().stop();    // Stop StreamProcessor
+      sp.getLeft().stop();    // Stop StreamProcessor
 
-        // Coordinator stream isn't required so a null check is necessary
-        if (sp.getRight() != null) {
-          sp.getRight().close();  // Close associated coordinator metadata store
-        }
-      });
+      // Coordinator stream isn't required so a null check is necessary
+      if (sp.getRight() != null) {
+        sp.getRight().close();  // Close associated coordinator metadata store
+      }
+    });
     cleanup();
   }
 
@@ -448,9 +448,9 @@
       if (failure.compareAndSet(null, t)) {
         // shutdown the other processors
         processors.forEach(sp -> {
-            sp.getLeft().stop();    // Stop StreamProcessor
-            sp.getRight().close();  // Close associated coordinator metadata store
-          });
+          sp.getLeft().stop();    // Stop StreamProcessor
+          sp.getRight().close();  // Close associated coordinator metadata store
+        });
       }
 
       // handle the current processor's shutdown failure.
diff --git a/samza-core/src/main/java/org/apache/samza/runtime/LocalContainerRunner.java b/samza-core/src/main/java/org/apache/samza/runtime/LocalContainerRunner.java
index f8d5e40..4b2cba0 100644
--- a/samza-core/src/main/java/org/apache/samza/runtime/LocalContainerRunner.java
+++ b/samza-core/src/main/java/org/apache/samza/runtime/LocalContainerRunner.java
@@ -34,7 +34,6 @@
 import org.apache.samza.util.SamzaUncaughtExceptionHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
 
 
 /**
@@ -67,9 +66,6 @@
     String jobName = jobConfig.getName()
         .orElseThrow(() -> new SamzaException(String.format("Config %s is missing", JobConfig.JOB_NAME)));
     String jobId = jobConfig.getJobId();
-    MDC.put("containerName", "samza-container-" + containerId);
-    MDC.put("jobName", jobName);
-    MDC.put("jobId", jobId);
 
     ApplicationDescriptorImpl<? extends ApplicationDescriptor> appDesc =
         ApplicationDescriptorUtil.getAppDescriptor(ApplicationUtil.fromConfig(config), config);
diff --git a/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java b/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java
index 3af5db1..bb4ea18 100644
--- a/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java
+++ b/samza-core/src/main/java/org/apache/samza/runtime/RemoteApplicationRunner.java
@@ -77,10 +77,10 @@
 
       // 3. submit jobs for remote execution
       jobConfigs.forEach(jobConfig -> {
-          LOG.info("Starting job {} with config {}", jobConfig.getName(), jobConfig);
-          JobRunner runner = new JobRunner(jobConfig);
-          runner.run(true);
-        });
+        LOG.info("Starting job {} with config {}", jobConfig.getName(), jobConfig);
+        JobRunner runner = new JobRunner(jobConfig);
+        runner.run(true);
+      });
     } catch (Throwable t) {
       throw new SamzaException("Failed to run application", t);
     }
diff --git a/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java b/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java
index cbebbde..4e4bdf1 100644
--- a/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java
+++ b/samza-core/src/main/java/org/apache/samza/scheduler/EpochTimeScheduler.java
@@ -19,14 +19,15 @@
 
 package org.apache.samza.scheduler;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
-
-import static com.google.common.base.Preconditions.checkState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Per-task scheduler for keyed timers.
@@ -36,7 +37,7 @@
  * 3) triggers listener whenever a timer fires.
  */
 public class EpochTimeScheduler {
-
+  private static final Logger LOG = LoggerFactory.getLogger(EpochTimeScheduler.class);
   /**
    * For run loop to listen to timer firing so it can schedule the callbacks.
    */
@@ -57,19 +58,43 @@
     this.executor = executor;
   }
 
+  @VisibleForTesting
+  Map<Object, ScheduledFuture> getScheduledFutures() {
+    return scheduledFutures;
+  }
+
   public <K> void setTimer(K key, long timestamp, ScheduledCallback<K> callback) {
-    checkState(!scheduledFutures.containsKey(key),
-        String.format("Duplicate key %s registration for the same timer", key));
+    if (scheduledFutures.containsKey(key)) {
+      LOG.warn("Registering duplicate callback for key: {}. Attempting to cancel the previous callback", key);
+      ScheduledFuture<?> scheduledFuture = scheduledFutures.get(key);
+
+      /*
+       * We can have a race between the time we check for the presence of the key and the time we attempt to cancel;
+       * Hence we check for non-null criteria to ensure the executor hasn't kicked off the callback for the key which
+       * removes the future from the map before invoking onTimer.
+       *  1. In the event that callback is running then we will not attempt to interrupt the action and
+       *     cancel will return as unsuccessful.
+       *  2. In case of the callback successfully executed, we want to allow duplicate registration to keep the
+       *     behavior consistent with the scenario where the callback is already executed or in progress even before
+       *     we entered this condition.
+       */
+      if (scheduledFuture != null
+          && !scheduledFuture.cancel(false)
+          && !scheduledFuture.isDone()) {
+        LOG.warn("Failed to cancel the previous callback successfully. Ignoring the current request to register new callback");
+        return;
+      }
+    }
 
     final long delay = timestamp - System.currentTimeMillis();
     final ScheduledFuture<?> scheduledFuture = executor.schedule(() -> {
-        scheduledFutures.remove(key);
-        readyTimers.put(TimerKey.of(key, timestamp), callback);
+      scheduledFutures.remove(key);
+      readyTimers.put(TimerKey.of(key, timestamp), callback);
 
-        if (timerListener != null) {
-          timerListener.onTimer();
-        }
-      }, delay > 0 ? delay : 0, TimeUnit.MILLISECONDS);
+      if (timerListener != null) {
+        timerListener.onTimer();
+      }
+    }, delay > 0 ? delay : 0, TimeUnit.MILLISECONDS);
     scheduledFutures.put(key, scheduledFuture);
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java b/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java
index 4eed058..5eda128 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/ChangelogStreamManager.java
@@ -74,12 +74,12 @@
     LOG.debug("Reading changelog partition information");
     final Map<TaskName, Integer> changelogMapping = new HashMap<>();
     metadataStore.all().forEach((taskName, partitionIdAsBytes) -> {
-        String partitionId = valueSerde.fromBytes(partitionIdAsBytes);
-        LOG.debug("TaskName: {} is mapped to {}", taskName, partitionId);
-        if (StringUtils.isNotBlank(partitionId)) {
-          changelogMapping.put(new TaskName(taskName), Integer.valueOf(partitionId));
-        }
-      });
+      String partitionId = valueSerde.fromBytes(partitionIdAsBytes);
+      LOG.debug("TaskName: {} is mapped to {}", taskName, partitionId);
+      if (StringUtils.isNotBlank(partitionId)) {
+        changelogMapping.put(new TaskName(taskName), Integer.valueOf(partitionId));
+      }
+    });
     return changelogMapping;
   }
 
@@ -129,47 +129,47 @@
     StorageConfig storageConfig = new StorageConfig(config);
     ImmutableMap.Builder<String, SystemStream> storeNameSystemStreamMapBuilder = new ImmutableMap.Builder<>();
     storageConfig.getStoreNames().forEach(storeName -> {
-        Optional<String> changelogStream = storageConfig.getChangelogStream(storeName);
-        if (changelogStream.isPresent() && StringUtils.isNotBlank(changelogStream.get())) {
-          storeNameSystemStreamMapBuilder.put(storeName, StreamUtil.getSystemStreamFromNames(changelogStream.get()));
-        }
-      });
+      Optional<String> changelogStream = storageConfig.getChangelogStream(storeName);
+      if (changelogStream.isPresent() && StringUtils.isNotBlank(changelogStream.get())) {
+        storeNameSystemStreamMapBuilder.put(storeName, StreamUtil.getSystemStreamFromNames(changelogStream.get()));
+      }
+    });
     Map<String, SystemStream> storeNameSystemStreamMapping = storeNameSystemStreamMapBuilder.build();
 
     // Get SystemAdmin for changelog store's system and attempt to create the stream
     SystemConfig systemConfig = new SystemConfig(config);
     storeNameSystemStreamMapping.forEach((storeName, systemStream) -> {
-        // Load system admin for this system.
-        SystemAdmin systemAdmin = systemConfig.getSystemAdmin(systemStream.getSystem());
+      // Load system admin for this system.
+      SystemAdmin systemAdmin = systemConfig.getSystemAdmin(systemStream.getSystem());
 
-        if (systemAdmin == null) {
-          throw new SamzaException(String.format(
-              "Error creating changelog. Changelog on store %s uses system %s, which is missing from the configuration.",
-              storeName, systemStream.getSystem()));
-        }
+      if (systemAdmin == null) {
+        throw new SamzaException(String.format(
+            "Error creating changelog. Changelog on store %s uses system %s, which is missing from the configuration.",
+            storeName, systemStream.getSystem()));
+      }
 
-        StreamSpec changelogSpec =
-            StreamSpec.createChangeLogStreamSpec(systemStream.getStream(), systemStream.getSystem(),
-                maxChangeLogStreamPartitions);
+      StreamSpec changelogSpec =
+          StreamSpec.createChangeLogStreamSpec(systemStream.getStream(), systemStream.getSystem(),
+              maxChangeLogStreamPartitions);
 
-        systemAdmin.start();
+      systemAdmin.start();
 
-        if (systemAdmin.createStream(changelogSpec)) {
-          LOG.info(String.format("created changelog stream %s.", systemStream.getStream()));
-        } else {
-          LOG.info(String.format("changelog stream %s already exists.", systemStream.getStream()));
-        }
-        systemAdmin.validateStream(changelogSpec);
+      if (systemAdmin.createStream(changelogSpec)) {
+        LOG.info(String.format("created changelog stream %s.", systemStream.getStream()));
+      } else {
+        LOG.info(String.format("changelog stream %s already exists.", systemStream.getStream()));
+      }
+      systemAdmin.validateStream(changelogSpec);
 
-        if (storageConfig.getAccessLogEnabled(storeName)) {
-          String accesslogStream = storageConfig.getAccessLogStream(systemStream.getStream());
-          StreamSpec accesslogSpec =
-              new StreamSpec(accesslogStream, accesslogStream, systemStream.getSystem(), maxChangeLogStreamPartitions);
-          systemAdmin.createStream(accesslogSpec);
-          systemAdmin.validateStream(accesslogSpec);
-        }
+      if (storageConfig.getAccessLogEnabled(storeName)) {
+        String accesslogStream = storageConfig.getAccessLogStream(systemStream.getStream());
+        StreamSpec accesslogSpec =
+            new StreamSpec(accesslogStream, accesslogStream, systemStream.getSystem(), maxChangeLogStreamPartitions);
+        systemAdmin.createStream(accesslogSpec);
+        systemAdmin.validateStream(accesslogSpec);
+      }
 
-        systemAdmin.stop();
-      });
+      systemAdmin.stop();
+    });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java b/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java
index d2f0097..44dd59a 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/NonTransactionalStateTaskRestoreManager.java
@@ -127,37 +127,37 @@
 
     FileUtil fileUtil = new FileUtil();
     taskStores.forEach((storeName, storageEngine) -> {
-        if (!storageEngine.getStoreProperties().isLoggedStore()) {
-          File nonLoggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
-          LOG.info("Got non logged storage partition directory as " + nonLoggedStorePartitionDir.toPath().toString());
+      if (!storageEngine.getStoreProperties().isLoggedStore()) {
+        File nonLoggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        LOG.info("Got non logged storage partition directory as " + nonLoggedStorePartitionDir.toPath().toString());
 
-          if (nonLoggedStorePartitionDir.exists()) {
-            LOG.info("Deleting non logged storage partition directory " + nonLoggedStorePartitionDir.toPath().toString());
-            fileUtil.rm(nonLoggedStorePartitionDir);
-          }
+        if (nonLoggedStorePartitionDir.exists()) {
+          LOG.info("Deleting non logged storage partition directory " + nonLoggedStorePartitionDir.toPath().toString());
+          fileUtil.rm(nonLoggedStorePartitionDir);
+        }
+      } else {
+        File loggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        LOG.info("Got logged storage partition directory as " + loggedStorePartitionDir.toPath().toString());
+
+        // Delete the logged store if it is not valid.
+        if (!isLoggedStoreValid(storeName, loggedStorePartitionDir) || storageConfig.getCleanLoggedStoreDirsOnStart(storeName)) {
+          LOG.info("Deleting logged storage partition directory " + loggedStorePartitionDir.toPath().toString());
+          fileUtil.rm(loggedStorePartitionDir);
         } else {
-          File loggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
-          LOG.info("Got logged storage partition directory as " + loggedStorePartitionDir.toPath().toString());
 
-          // Delete the logged store if it is not valid.
-          if (!isLoggedStoreValid(storeName, loggedStorePartitionDir) || storageConfig.getCleanLoggedStoreDirsOnStart(storeName)) {
-            LOG.info("Deleting logged storage partition directory " + loggedStorePartitionDir.toPath().toString());
-            fileUtil.rm(loggedStorePartitionDir);
-          } else {
+          SystemStreamPartition changelogSSP = new SystemStreamPartition(changelogSystemStreams.get(storeName), taskModel.getChangelogPartition());
+          Map<SystemStreamPartition, String> offset =
+              storageManagerUtil.readOffsetFile(loggedStorePartitionDir, Collections.singleton(changelogSSP), false);
+          LOG.info("Read offset {} for the store {} from logged storage partition directory {}", offset, storeName, loggedStorePartitionDir);
 
-            SystemStreamPartition changelogSSP = new SystemStreamPartition(changelogSystemStreams.get(storeName), taskModel.getChangelogPartition());
-            Map<SystemStreamPartition, String> offset =
-                storageManagerUtil.readOffsetFile(loggedStorePartitionDir, Collections.singleton(changelogSSP), false);
-            LOG.info("Read offset {} for the store {} from logged storage partition directory {}", offset, storeName, loggedStorePartitionDir);
-
-            if (offset.containsKey(changelogSSP)) {
-              fileOffsets.put(changelogSSP, offset.get(changelogSSP));
-            }
+          if (offset.containsKey(changelogSSP)) {
+            fileOffsets.put(changelogSSP, offset.get(changelogSSP));
           }
         }
-      });
+      }
+    });
   }
 
   /**
@@ -188,25 +188,25 @@
   private void setupBaseDirs() {
     LOG.debug("Setting up base directories for stores.");
     taskStores.forEach((storeName, storageEngine) -> {
-        if (storageEngine.getStoreProperties().isLoggedStore()) {
+      if (storageEngine.getStoreProperties().isLoggedStore()) {
 
-          File loggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        File loggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
 
-          LOG.info("Using logged storage partition directory: " + loggedStorePartitionDir.toPath().toString()
-              + " for store: " + storeName);
+        LOG.info("Using logged storage partition directory: " + loggedStorePartitionDir.toPath().toString()
+            + " for store: " + storeName);
 
-          if (!loggedStorePartitionDir.exists()) {
-            loggedStorePartitionDir.mkdirs();
-          }
-        } else {
-          File nonLoggedStorePartitionDir =
-              storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
-          LOG.info("Using non logged storage partition directory: " + nonLoggedStorePartitionDir.toPath().toString()
-              + " for store: " + storeName);
-          nonLoggedStorePartitionDir.mkdirs();
+        if (!loggedStorePartitionDir.exists()) {
+          loggedStorePartitionDir.mkdirs();
         }
-      });
+      } else {
+        File nonLoggedStorePartitionDir =
+            storageManagerUtil.getTaskStoreDir(nonLoggedStoreBaseDirectory, storeName, taskModel.getTaskName(), taskModel.getTaskMode());
+        LOG.info("Using non logged storage partition directory: " + nonLoggedStorePartitionDir.toPath().toString()
+            + " for store: " + storeName);
+        nonLoggedStorePartitionDir.mkdirs();
+      }
+    });
   }
 
   /**
@@ -336,13 +336,13 @@
   public void stopPersistentStores() {
 
     Map<String, StorageEngine> persistentStores = this.taskStores.entrySet().stream().filter(e -> {
-        return e.getValue().getStoreProperties().isPersistedToDisk();
-      }).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+      return e.getValue().getStoreProperties().isPersistedToDisk();
+    }).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
 
     persistentStores.forEach((storeName, storageEngine) -> {
-        storageEngine.stop();
-        this.taskStores.remove(storeName);
-      });
+      storageEngine.stop();
+      this.taskStores.remove(storeName);
+    });
     LOG.info("Stopped persistent stores {}", persistentStores);
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/storage/StorageManagerUtil.java b/samza-core/src/main/java/org/apache/samza/storage/StorageManagerUtil.java
index 50d7950..c2ebe44 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/StorageManagerUtil.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/StorageManagerUtil.java
@@ -94,7 +94,8 @@
             + " The values between these offsets cannot be restored.", resumeOffset, oldestOffset);
       }
     }
-
+    LOG.info("Starting offset for SystemStreamPartition {} is {}, fileOffset: {}, oldestOffset from source: {}", ssp,
+        startingOffset, fileOffset, oldestOffset);
     return startingOffset;
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java b/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java
index 5d34176..e292df9 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/StorageRecovery.java
@@ -116,15 +116,15 @@
 
     systemAdmins.start();
     this.containerStorageManagers.forEach((containerName, containerStorageManager) -> {
-        try {
-          containerStorageManager.start();
-        } catch (InterruptedException e) {
-          // we can ignore the exception since its only used in the context of a command line tool and bubbling the
-          // exception upstream isn't needed.
-          LOG.warn("Received an interrupt during store restoration for container {}."
-              + " Proceeding with the next container", containerName);
-        }
-      });
+      try {
+        containerStorageManager.start();
+      } catch (InterruptedException e) {
+        // we can ignore the exception since its only used in the context of a command line tool and bubbling the
+        // exception upstream isn't needed.
+        LOG.warn("Received an interrupt during store restoration for container {}."
+            + " Proceeding with the next container", containerName);
+      }
+    });
     this.containerStorageManagers.forEach((containerName, containerStorageManager) -> containerStorageManager.shutdown());
     systemAdmins.stop();
 
@@ -201,13 +201,13 @@
     // Adding all serdes from factories
     serializerConfig.getSerdeNames()
         .forEach(serdeName -> {
-            String serdeClassName = serializerConfig.getSerdeFactoryClass(serdeName)
-              .orElseGet(() -> SerializerConfig.getPredefinedSerdeFactoryName(serdeName));
-            @SuppressWarnings("unchecked")
-            Serde<Object> serde =
-                ReflectionUtil.getObj(serdeClassName, SerdeFactory.class).getSerde(serdeName, serializerConfig);
-            serdeMap.put(serdeName, serde);
-          });
+          String serdeClassName = serializerConfig.getSerdeFactoryClass(serdeName)
+            .orElseGet(() -> SerializerConfig.getPredefinedSerdeFactoryName(serdeName));
+          @SuppressWarnings("unchecked")
+          Serde<Object> serde =
+              ReflectionUtil.getObj(serdeClassName, SerdeFactory.class).getSerde(serdeName, serializerConfig);
+          serdeMap.put(serdeName, serde);
+        });
 
     return serdeMap;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputHandler.java b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputHandler.java
new file mode 100644
index 0000000..7ab4036
--- /dev/null
+++ b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputHandler.java
@@ -0,0 +1,273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.storage;
+
+
+import com.google.common.annotations.VisibleForTesting;
+import java.io.File;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+import org.apache.samza.Partition;
+import org.apache.samza.SamzaException;
+import org.apache.samza.container.TaskName;
+import org.apache.samza.job.model.TaskMode;
+import org.apache.samza.storage.kv.Entry;
+import org.apache.samza.storage.kv.KeyValueStore;
+import org.apache.samza.system.IncomingMessageEnvelope;
+import org.apache.samza.system.StreamMetadataCache;
+import org.apache.samza.system.SystemAdmins;
+import org.apache.samza.system.SystemStream;
+import org.apache.samza.system.SystemStreamMetadata;
+import org.apache.samza.system.SystemStreamPartition;
+import org.apache.samza.util.Clock;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.collection.JavaConverters;
+
+
+/**
+ * This class encapsulates all processing logic / state for all side input SSPs within a task.
+ */
+public class TaskSideInputHandler {
+  private static final Logger LOG = LoggerFactory.getLogger(TaskSideInputHandler.class);
+
+  private final StorageManagerUtil storageManagerUtil = new StorageManagerUtil();
+  private final Map<SystemStreamPartition, String> lastProcessedOffsets = new ConcurrentHashMap<>();
+
+  private final TaskName taskName;
+  private final TaskSideInputStorageManager taskSideInputStorageManager;
+  private final Map<SystemStreamPartition, Set<String>> sspToStores;
+  private final Map<String, SideInputsProcessor> storeToProcessor;
+  private final SystemAdmins systemAdmins;
+  private final StreamMetadataCache streamMetadataCache;
+
+  private Map<SystemStreamPartition, String> startingOffsets;
+
+  public TaskSideInputHandler(TaskName taskName, TaskMode taskMode, File storeBaseDir,
+      Map<String, StorageEngine> storeToStorageEngines, Map<String, Set<SystemStreamPartition>> storeToSSPs,
+      Map<String, SideInputsProcessor> storeToProcessor, SystemAdmins systemAdmins,
+      StreamMetadataCache streamMetadataCache, Clock clock) {
+    validateProcessorConfiguration(storeToSSPs.keySet(), storeToProcessor);
+
+    this.taskName = taskName;
+    this.systemAdmins = systemAdmins;
+    this.streamMetadataCache = streamMetadataCache;
+    this.storeToProcessor = storeToProcessor;
+
+    this.sspToStores = new HashMap<>();
+    storeToSSPs.forEach((store, ssps) -> {
+      for (SystemStreamPartition ssp: ssps) {
+        this.sspToStores.computeIfAbsent(ssp, key -> new HashSet<>());
+        this.sspToStores.computeIfPresent(ssp, (key, value) -> {
+          value.add(store);
+          return value;
+        });
+      }
+    });
+
+    this.taskSideInputStorageManager = new TaskSideInputStorageManager(taskName,
+        taskMode,
+        storeBaseDir,
+        storeToStorageEngines,
+        storeToSSPs,
+        clock);
+  }
+
+  /**
+   * The {@link TaskName} associated with this {@link TaskSideInputHandler}
+   *
+   * @return the task name for this handler
+   */
+  public TaskName getTaskName() {
+    return this.taskName;
+  }
+
+  /**
+   * Initializes the underlying {@link TaskSideInputStorageManager} and determines starting offsets for each SSP.
+   */
+  public void init() {
+    this.taskSideInputStorageManager.init();
+
+    Map<SystemStreamPartition, String> fileOffsets = this.taskSideInputStorageManager.getFileOffsets();
+    LOG.info("File offsets for the task {}: {}", taskName, fileOffsets);
+
+    this.lastProcessedOffsets.putAll(fileOffsets);
+    LOG.info("Last processed offsets for the task {}: {}", taskName, lastProcessedOffsets);
+
+    this.startingOffsets = getStartingOffsets(fileOffsets, getOldestOffsets());
+    LOG.info("Starting offsets for the task {}: {}", taskName, startingOffsets);
+  }
+
+  /**
+   * Processes the incoming side input message envelope and updates the last processed offset for its SSP.
+   * Synchronized inorder to be exclusive with flush().
+   *
+   * @param envelope incoming envelope to be processed
+   */
+  public synchronized void process(IncomingMessageEnvelope envelope) {
+    SystemStreamPartition envelopeSSP = envelope.getSystemStreamPartition();
+    String envelopeOffset = envelope.getOffset();
+
+    for (String store: this.sspToStores.get(envelopeSSP)) {
+      SideInputsProcessor storeProcessor = this.storeToProcessor.get(store);
+      KeyValueStore keyValueStore = (KeyValueStore) this.taskSideInputStorageManager.getStore(store);
+      Collection<Entry<?, ?>> entriesToBeWritten = storeProcessor.process(envelope, keyValueStore);
+
+      // TODO: SAMZA-2255: optimize writes to side input stores
+      for (Entry entry : entriesToBeWritten) {
+        // If the key is null we ignore, if the value is null, we issue a delete, else we issue a put
+        if (entry.getKey() != null) {
+          if (entry.getValue() != null) {
+            keyValueStore.put(entry.getKey(), entry.getValue());
+          } else {
+            keyValueStore.delete(entry.getKey());
+          }
+        }
+      }
+    }
+
+    this.lastProcessedOffsets.put(envelopeSSP, envelopeOffset);
+  }
+
+  /**
+   * Flushes the underlying {@link TaskSideInputStorageManager}
+   * Synchronized inorder to be exclusive with process()
+   */
+  public synchronized void flush() {
+    this.taskSideInputStorageManager.flush(this.lastProcessedOffsets);
+  }
+
+  /**
+   * Gets the starting offset for the given side input {@link SystemStreamPartition}.
+   *
+   * Note: The method doesn't respect {@link org.apache.samza.config.StreamConfig#CONSUMER_OFFSET_DEFAULT} and
+   * {@link org.apache.samza.config.StreamConfig#CONSUMER_RESET_OFFSET} configurations. It will use the local offset
+   * file if it is valid, else it will fall back to oldest offset in the stream.
+   *
+   * @param ssp side input system stream partition to get the starting offset for
+   * @return the starting offset
+   */
+  public String getStartingOffset(SystemStreamPartition ssp) {
+    return this.startingOffsets.get(ssp);
+  }
+
+  /**
+   * Gets the last processed offset for the given side input {@link SystemStreamPartition}.
+   *
+   * @param ssp side input system stream partition to get the last processed offset for
+   * @return the last processed offset
+   */
+  public String getLastProcessedOffset(SystemStreamPartition ssp) {
+    return this.lastProcessedOffsets.get(ssp);
+  }
+
+  /**
+   * Stops the underlying storage manager at the last processed offsets. Any pending and upcoming invocations
+   * of {@link #process} and {@link #flush} are assumed to have completed or ceased prior to calling this method.
+   */
+  public void stop() {
+    this.taskSideInputStorageManager.stop(this.lastProcessedOffsets);
+  }
+
+  /**
+   * Gets the starting offsets for the {@link SystemStreamPartition}s belonging to all the side input stores.
+   * If the local file offset is available and is greater than the oldest available offset from source, uses it,
+   * else falls back to oldest offset in the source.
+   *
+   * @param fileOffsets offsets from the local offset file
+   * @param oldestOffsets oldest offsets from the source
+   * @return a {@link Map} of {@link SystemStreamPartition} to offset
+   */
+  @VisibleForTesting
+  Map<SystemStreamPartition, String> getStartingOffsets(
+      Map<SystemStreamPartition, String> fileOffsets, Map<SystemStreamPartition, String> oldestOffsets) {
+    Map<SystemStreamPartition, String> startingOffsets = new HashMap<>();
+
+    this.sspToStores.keySet().forEach(ssp -> {
+      String fileOffset = fileOffsets.get(ssp);
+      String oldestOffset = oldestOffsets.get(ssp);
+
+      startingOffsets.put(ssp,
+          this.storageManagerUtil.getStartingOffset(
+              ssp, this.systemAdmins.getSystemAdmin(ssp.getSystem()), fileOffset, oldestOffset));
+    });
+
+    return startingOffsets;
+  }
+
+  /**
+   * Gets the oldest offset for the {@link SystemStreamPartition}s associated with all the store side inputs.
+   *   1. Groups the list of the SSPs based on system stream
+   *   2. Fetches the {@link SystemStreamMetadata} from {@link StreamMetadataCache}
+   *   3. Fetches the partition metadata for each system stream and fetch the corresponding partition metadata
+   *      and populates the oldest offset for SSPs belonging to the system stream.
+   *
+   * @return a {@link Map} of {@link SystemStreamPartition} to their oldest offset. If partitionMetadata could not be
+   * obtained for any {@link SystemStreamPartition} the offset for it is populated as null.
+   */
+  @VisibleForTesting
+  Map<SystemStreamPartition, String> getOldestOffsets() {
+    Map<SystemStreamPartition, String> oldestOffsets = new HashMap<>();
+
+    // Step 1
+    Map<SystemStream, List<SystemStreamPartition>> systemStreamToSsp = this.sspToStores.keySet().stream()
+        .collect(Collectors.groupingBy(SystemStreamPartition::getSystemStream));
+
+    // Step 2
+    Map<SystemStream, SystemStreamMetadata> metadata = JavaConverters.mapAsJavaMapConverter(
+        this.streamMetadataCache.getStreamMetadata(
+            JavaConverters.asScalaSetConverter(systemStreamToSsp.keySet()).asScala().toSet(), false)).asJava();
+
+    // Step 3
+    metadata.forEach((systemStream, systemStreamMetadata) -> {
+
+      // get the partition metadata for each system stream
+      Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata =
+          systemStreamMetadata.getSystemStreamPartitionMetadata();
+
+      // For SSPs belonging to the system stream, use the partition metadata to get the oldest offset
+      // if partitionMetadata was not obtained for any SSP, populate oldest-offset as null
+      // Because of https://bugs.openjdk.java.net/browse/JDK-8148463 using lambda will NPE when getOldestOffset() is null
+      for (SystemStreamPartition ssp : systemStreamToSsp.get(systemStream)) {
+        oldestOffsets.put(ssp, partitionMetadata.get(ssp.getPartition()).getOldestOffset());
+      }
+    });
+
+    return oldestOffsets;
+  }
+
+  /**
+   * Validates that each store has an associated {@link SideInputsProcessor}
+   */
+  private void validateProcessorConfiguration(Set<String> stores, Map<String, SideInputsProcessor> storeToProcessor) {
+    stores.forEach(storeName -> {
+      if (!storeToProcessor.containsKey(storeName)) {
+        throw new SamzaException(
+            String.format("Side inputs processor missing for store: %s.", storeName));
+      }
+    });
+  }
+}
diff --git a/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java
index e2cfe1d..c93e0b3 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/TaskSideInputStorageManager.java
@@ -20,34 +20,20 @@
 package org.apache.samza.storage;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.samza.Partition;
 import org.apache.samza.SamzaException;
-import org.apache.samza.config.Config;
 import org.apache.samza.container.TaskName;
 import org.apache.samza.job.model.TaskMode;
-import org.apache.samza.storage.kv.Entry;
-import org.apache.samza.storage.kv.KeyValueStore;
-import org.apache.samza.system.IncomingMessageEnvelope;
-import org.apache.samza.system.StreamMetadataCache;
-import org.apache.samza.system.SystemAdmins;
-import org.apache.samza.system.SystemStream;
-import org.apache.samza.system.SystemStreamMetadata;
 import org.apache.samza.system.SystemStreamPartition;
 import org.apache.samza.util.Clock;
 import org.apache.samza.util.FileUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.collection.JavaConverters;
 
 import java.io.File;
-import java.util.Collection;
 import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.stream.Collectors;
@@ -62,53 +48,28 @@
   private static final long STORE_DELETE_RETENTION_MS = TimeUnit.DAYS.toMillis(1); // same as changelog delete retention
 
   private final Clock clock;
-  private final Map<String, SideInputsProcessor> storeToProcessor;
   private final Map<String, StorageEngine> stores;
   private final File storeBaseDir;
   private final Map<String, Set<SystemStreamPartition>> storeToSSps;
-  private final Map<SystemStreamPartition, Set<String>> sspsToStores;
-  private final StreamMetadataCache streamMetadataCache;
-  private final SystemAdmins systemAdmins;
   private final TaskName taskName;
   private final TaskMode taskMode;
-  private final Map<SystemStreamPartition, String> lastProcessedOffsets = new ConcurrentHashMap<>();
   private final StorageManagerUtil storageManagerUtil = new StorageManagerUtil();
 
-  private Map<SystemStreamPartition, String> startingOffsets;
+  public TaskSideInputStorageManager(TaskName taskName, TaskMode taskMode, File storeBaseDir, Map<String, StorageEngine> sideInputStores,
+      Map<String, Set<SystemStreamPartition>> storesToSSPs, Clock clock) {
+    validateStoreConfiguration(sideInputStores);
 
-  public TaskSideInputStorageManager(
-      TaskName taskName,
-      TaskMode taskMode,
-      StreamMetadataCache streamMetadataCache,
-      File storeBaseDir,
-      Map<String, StorageEngine> sideInputStores,
-      Map<String, SideInputsProcessor> storesToProcessor,
-      Map<String, Set<SystemStreamPartition>> storesToSSPs,
-      SystemAdmins systemAdmins,
-      Config config,
-      Clock clock) {
     this.clock = clock;
     this.stores = sideInputStores;
     this.storeBaseDir = storeBaseDir;
     this.storeToSSps = storesToSSPs;
-    this.streamMetadataCache = streamMetadataCache;
-    this.systemAdmins = systemAdmins;
     this.taskName = taskName;
     this.taskMode = taskMode;
-    this.storeToProcessor = storesToProcessor;
+  }
 
-    validateStoreConfiguration();
-
-    this.sspsToStores = new HashMap<>();
-    storesToSSPs.forEach((store, ssps) -> {
-        for (SystemStreamPartition ssp: ssps) {
-          sspsToStores.computeIfAbsent(ssp, key -> new HashSet<>());
-          sspsToStores.computeIfPresent(ssp, (key, value) -> {
-              value.add(store);
-              return value;
-            });
-        }
-      });
+  // Get the taskName associated with this instance.
+  public TaskName getTaskName() {
+    return this.taskName;
   }
 
   /**
@@ -117,38 +78,32 @@
   public void init() {
     LOG.info("Initializing side input stores.");
 
-    Map<SystemStreamPartition, String> fileOffsets = getFileOffsets();
-    LOG.info("File offsets for the task {}: ", taskName, fileOffsets);
-
-    Map<SystemStreamPartition, String> oldestOffsets = getOldestOffsets();
-    LOG.info("Oldest offsets for the task {}: ", taskName, fileOffsets);
-
-    startingOffsets = getStartingOffsets(fileOffsets, oldestOffsets);
-    LOG.info("Starting offsets for the task {}: {}", taskName, startingOffsets);
-
-    lastProcessedOffsets.putAll(fileOffsets);
-    LOG.info("Last processed offsets for the task {}: {}", taskName, lastProcessedOffsets);
-
     initializeStoreDirectories();
   }
 
   /**
    * Flushes the contents of the underlying store and writes the offset file to disk.
    * Synchronized inorder to be exclusive with process()
+   *
+   * @param lastProcessedOffsets The last processed offsets for each SSP. These will be used when writing offsets files
+   *                             for each store.
    */
-  public synchronized void flush() {
+  public void flush(Map<SystemStreamPartition, String> lastProcessedOffsets) {
     LOG.info("Flushing the side input stores.");
     stores.values().forEach(StorageEngine::flush);
-    writeOffsetFiles();
+    writeFileOffsets(lastProcessedOffsets);
   }
 
   /**
    * Stops the storage engines for all the stores and writes the offset file to disk.
+   *
+   * @param lastProcessedOffsets The last processed offsets for each SSP. These will be used when writing offsets files
+   *                             for each store.
    */
-  public void stop() {
+  public void stop(Map<SystemStreamPartition, String> lastProcessedOffsets) {
     LOG.info("Stopping the side input stores.");
     stores.values().forEach(StorageEngine::stop);
-    writeOffsetFiles();
+    writeFileOffsets(lastProcessedOffsets);
   }
 
   /**
@@ -162,77 +117,6 @@
   }
 
   /**
-   * Gets the starting offset for the given side input {@link SystemStreamPartition}.
-   *
-   * Note: The method doesn't respect {@link org.apache.samza.config.StreamConfig#CONSUMER_OFFSET_DEFAULT} and
-   * {@link org.apache.samza.config.StreamConfig#CONSUMER_RESET_OFFSET} configurations. It will use the local offset
-   * file if it is valid, else it will fall back to oldest offset in the stream.
-   *
-   * @param ssp side input system stream partition to get the starting offset for
-   * @return the starting offset
-   */
-  public String getStartingOffset(SystemStreamPartition ssp) {
-    return startingOffsets.get(ssp);
-  }
-
-  // Get the taskName associated with this instance.
-  public TaskName getTaskName() {
-    return this.taskName;
-  }
-
-  /**
-   * Gets the last processed offset for the given side input {@link SystemStreamPartition}.
-   *
-   * @param ssp side input system stream partition to get the last processed offset for
-   * @return the last processed offset
-   */
-  public String getLastProcessedOffset(SystemStreamPartition ssp) {
-    return lastProcessedOffsets.get(ssp);
-  }
-
-  /**
-   * For unit testing only
-   */
-  @VisibleForTesting
-  void updateLastProcessedOffset(SystemStreamPartition ssp, String offset) {
-    lastProcessedOffsets.put(ssp, offset);
-  }
-
-  /**
-   * Processes the incoming side input message envelope and updates the last processed offset for its SSP.
-   * Synchronized inorder to be exclusive with flush().
-   *
-   * @param message incoming message to be processed
-   */
-  public synchronized void process(IncomingMessageEnvelope message) {
-    SystemStreamPartition ssp = message.getSystemStreamPartition();
-    Set<String> storeNames = sspsToStores.get(ssp);
-
-    for (String storeName : storeNames) {
-      SideInputsProcessor sideInputsProcessor = storeToProcessor.get(storeName);
-
-      KeyValueStore keyValueStore = (KeyValueStore) stores.get(storeName);
-      Collection<Entry<?, ?>> entriesToBeWritten = sideInputsProcessor.process(message, keyValueStore);
-
-      // Iterate over the list to be written.
-      // TODO: SAMZA-2255: Optimize value writes in TaskSideInputStorageManager
-      for (Entry entry : entriesToBeWritten) {
-        // If the key is null we ignore, if the value is null, we issue a delete, else we issue a put
-        if (entry.getKey() != null) {
-          if (entry.getValue() != null) {
-            keyValueStore.put(entry.getKey(), entry.getValue());
-          } else {
-            keyValueStore.delete(entry.getKey());
-          }
-        }
-      }
-    }
-
-    // update the last processed offset
-    lastProcessedOffsets.put(ssp, message.getOffset());
-  }
-
-  /**
    * Initializes the store directories for all the stores:
    *  1. Cleans up the directories for invalid stores.
    *  2. Ensures that the directories exist.
@@ -241,41 +125,42 @@
     LOG.info("Initializing side input store directories.");
 
     stores.keySet().forEach(storeName -> {
-        File storeLocation = getStoreLocation(storeName);
-        String storePath = storeLocation.toPath().toString();
-        if (!isValidSideInputStore(storeName, storeLocation)) {
-          LOG.info("Cleaning up the store directory at {} for {}", storePath, storeName);
-          new FileUtil().rm(storeLocation);
-        }
+      File storeLocation = getStoreLocation(storeName);
+      String storePath = storeLocation.toPath().toString();
+      if (!isValidSideInputStore(storeName, storeLocation)) {
+        LOG.info("Cleaning up the store directory at {} for {}", storePath, storeName);
+        new FileUtil().rm(storeLocation);
+      }
 
-        if (isPersistedStore(storeName) && !storeLocation.exists()) {
-          LOG.info("Creating {} as the store directory for the side input store {}", storePath, storeName);
-          storeLocation.mkdirs();
-        }
-      });
+      if (isPersistedStore(storeName) && !storeLocation.exists()) {
+        LOG.info("Creating {} as the store directory for the side input store {}", storePath, storeName);
+        storeLocation.mkdirs();
+      }
+    });
   }
 
   /**
    * Writes the offset files for all side input stores one by one. There is one offset file per store.
    * Its contents are a JSON encoded mapping from each side input SSP to its last processed offset, and a checksum.
+   *
+   * @param lastProcessedOffsets The offset per SSP to write
    */
-  @VisibleForTesting
-  void writeOffsetFiles() {
+  public void writeFileOffsets(Map<SystemStreamPartition, String> lastProcessedOffsets) {
     storeToSSps.entrySet().stream()
         .filter(entry -> isPersistedStore(entry.getKey())) // filter out in-memory side input stores
         .forEach((entry) -> {
-            String storeName = entry.getKey();
-            Map<SystemStreamPartition, String> offsets = entry.getValue().stream()
-              .filter(lastProcessedOffsets::containsKey)
-              .collect(Collectors.toMap(Function.identity(), lastProcessedOffsets::get));
+          String storeName = entry.getKey();
+          Map<SystemStreamPartition, String> offsets = entry.getValue().stream()
+            .filter(lastProcessedOffsets::containsKey)
+            .collect(Collectors.toMap(Function.identity(), lastProcessedOffsets::get));
 
-            try {
-              File taskStoreDir = storageManagerUtil.getTaskStoreDir(storeBaseDir, storeName, taskName, taskMode);
-              storageManagerUtil.writeOffsetFile(taskStoreDir, offsets, true);
-            } catch (Exception e) {
-              throw new SamzaException("Failed to write offset file for side input store: " + storeName, e);
-            }
-          });
+          try {
+            File taskStoreDir = storageManagerUtil.getTaskStoreDir(storeBaseDir, storeName, taskName, taskMode);
+            storageManagerUtil.writeOffsetFile(taskStoreDir, offsets, true);
+          } catch (Exception e) {
+            throw new SamzaException("Failed to write offset file for side input store: " + storeName, e);
+          }
+        });
   }
 
   /**
@@ -283,27 +168,25 @@
    *
    * @return a {@link Map} of {@link SystemStreamPartition} to offset in the offset files.
    */
-  @SuppressWarnings("unchecked")
-  @VisibleForTesting
-  Map<SystemStreamPartition, String> getFileOffsets() {
+  public Map<SystemStreamPartition, String> getFileOffsets() {
     LOG.info("Loading initial offsets from the file for side input stores.");
     Map<SystemStreamPartition, String> fileOffsets = new HashMap<>();
 
     stores.keySet().forEach(storeName -> {
-        LOG.debug("Reading local offsets for store: {}", storeName);
+      LOG.debug("Reading local offsets for store: {}", storeName);
 
-        File storeLocation = getStoreLocation(storeName);
-        if (isValidSideInputStore(storeName, storeLocation)) {
-          try {
+      File storeLocation = getStoreLocation(storeName);
+      if (isValidSideInputStore(storeName, storeLocation)) {
+        try {
 
-            Map<SystemStreamPartition, String> offsets =
-                storageManagerUtil.readOffsetFile(storeLocation, storeToSSps.get(storeName), true);
-            fileOffsets.putAll(offsets);
-          } catch (Exception e) {
-            LOG.warn("Failed to load the offset file for side input store:" + storeName, e);
-          }
+          Map<SystemStreamPartition, String> offsets =
+              storageManagerUtil.readOffsetFile(storeLocation, storeToSSps.get(storeName), true);
+          fileOffsets.putAll(offsets);
+        } catch (Exception e) {
+          LOG.warn("Failed to load the offset file for side input store:" + storeName, e);
         }
-      });
+      }
+    });
 
     return fileOffsets;
   }
@@ -313,71 +196,6 @@
     return storageManagerUtil.getTaskStoreDir(storeBaseDir, storeName, taskName, taskMode);
   }
 
-  /**
-   * Gets the starting offsets for the {@link SystemStreamPartition}s belonging to all the side input stores.
-   * If the local file offset is available and is greater than the oldest available offset from source, uses it,
-   * else falls back to oldest offset in the source.
-   *
-   * @param fileOffsets offsets from the local offset file
-   * @param oldestOffsets oldest offsets from the source
-   * @return a {@link Map} of {@link SystemStreamPartition} to offset
-   */
-  @VisibleForTesting
-  Map<SystemStreamPartition, String> getStartingOffsets(
-      Map<SystemStreamPartition, String> fileOffsets, Map<SystemStreamPartition, String> oldestOffsets) {
-    Map<SystemStreamPartition, String> startingOffsets = new HashMap<>();
-
-    sspsToStores.keySet().forEach(ssp -> {
-        String fileOffset = fileOffsets.get(ssp);
-        String oldestOffset = oldestOffsets.get(ssp);
-
-        startingOffsets.put(ssp,
-          storageManagerUtil.getStartingOffset(
-            ssp, systemAdmins.getSystemAdmin(ssp.getSystem()), fileOffset, oldestOffset));
-      });
-
-    return startingOffsets;
-  }
-
-  /**
-   * Gets the oldest offset for the {@link SystemStreamPartition}s associated with all the store side inputs.
-   *   1. Groups the list of the SSPs based on system stream
-   *   2. Fetches the {@link SystemStreamMetadata} from {@link StreamMetadataCache}
-   *   3. Fetches the partition metadata for each system stream and fetch the corresponding partition metadata
-   *      and populates the oldest offset for SSPs belonging to the system stream.
-   *
-   * @return a {@link Map} of {@link SystemStreamPartition} to their oldest offset.
-   */
-  @VisibleForTesting
-  Map<SystemStreamPartition, String> getOldestOffsets() {
-    Map<SystemStreamPartition, String> oldestOffsets = new HashMap<>();
-
-    // Step 1
-    Map<SystemStream, List<SystemStreamPartition>> systemStreamToSsp = sspsToStores.keySet().stream()
-        .collect(Collectors.groupingBy(SystemStreamPartition::getSystemStream));
-
-    // Step 2
-    Map<SystemStream, SystemStreamMetadata> metadata = JavaConverters.mapAsJavaMapConverter(
-        streamMetadataCache.getStreamMetadata(
-            JavaConverters.asScalaSetConverter(systemStreamToSsp.keySet()).asScala().toSet(), false)).asJava();
-
-    // Step 3
-    metadata.forEach((systemStream, systemStreamMetadata) -> {
-        // get the partition metadata for each system stream
-        Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata =
-          systemStreamMetadata.getSystemStreamPartitionMetadata();
-
-        // For SSPs belonging to the system stream, use the partition metadata to get the oldest offset
-        Map<SystemStreamPartition, String> offsets = systemStreamToSsp.get(systemStream).stream()
-          .collect(
-              Collectors.toMap(Function.identity(), ssp -> partitionMetadata.get(ssp.getPartition()).getOldestOffset()));
-
-        oldestOffsets.putAll(offsets);
-      });
-
-    return oldestOffsets;
-  }
-
   private boolean isValidSideInputStore(String storeName, File storeLocation) {
     return isPersistedStore(storeName)
         && !storageManagerUtil.isStaleStore(storeLocation, STORE_DELETE_RETENTION_MS, clock.currentTimeMillis(), true)
@@ -391,17 +209,12 @@
         .orElse(false);
   }
 
-  private void validateStoreConfiguration() {
+  private void validateStoreConfiguration(Map<String, StorageEngine> stores) {
     stores.forEach((storeName, storageEngine) -> {
-        if (!storeToProcessor.containsKey(storeName)) {
-          throw new SamzaException(
-              String.format("Side inputs processor missing for store: %s.", storeName));
-        }
-
-        if (storageEngine.getStoreProperties().isLoggedStore()) {
-          throw new SamzaException(
-              String.format("Cannot configure both side inputs and a changelog for store: %s.", storeName));
-        }
-      });
+      if (storageEngine.getStoreProperties().isLoggedStore()) {
+        throw new SamzaException(
+            String.format("Cannot configure both side inputs and a changelog for store: %s.", storeName));
+      }
+    });
   }
 }
\ No newline at end of file
diff --git a/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java b/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java
index c578d9a..e4633b7 100644
--- a/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java
+++ b/samza-core/src/main/java/org/apache/samza/storage/TransactionalStateTaskRestoreManager.java
@@ -144,10 +144,10 @@
   public void stopPersistentStores() {
     TaskName taskName = taskModel.getTaskName();
     storeEngines.forEach((storeName, storeEngine) -> {
-        if (storeEngine.getStoreProperties().isPersistedToDisk())
-          storeEngine.stop();
-        LOG.info("Stopped persistent store: {} in task: {}", storeName, taskName);
-      });
+      if (storeEngine.getStoreProperties().isPersistedToDisk())
+        storeEngine.stop();
+      LOG.info("Stopped persistent store: {} in task: {}", storeName, taskName);
+    });
   }
 
   /**
@@ -208,211 +208,211 @@
     Map<String, RestoreOffsets> storesToRestore = new HashMap<>();
 
     storeEngines.forEach((storeName, storageEngine) -> {
-        // do nothing if store is non persistent and not logged (e.g. in memory cache only)
-        if (!storageEngine.getStoreProperties().isPersistedToDisk() &&
+      // do nothing if store is non persistent and not logged (e.g. in memory cache only)
+      if (!storageEngine.getStoreProperties().isPersistedToDisk() &&
+        !storageEngine.getStoreProperties().isLoggedStore()) {
+        return;
+      }
+
+      // persistent but non-logged stores are always deleted
+      if (storageEngine.getStoreProperties().isPersistedToDisk() &&
           !storageEngine.getStoreProperties().isLoggedStore()) {
-          return;
-        }
+        File currentDir = storageManagerUtil.getTaskStoreDir(
+            nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
+        LOG.info("Marking current directory: {} for store: {} in task: {} for deletion since it is not a logged store.",
+            currentDir, storeName, taskName);
+        storeDirsToDelete.put(storeName, currentDir);
+        // persistent but non-logged stores should not have checkpoint dirs
+        return;
+      }
 
-        // persistent but non-logged stores are always deleted
-        if (storageEngine.getStoreProperties().isPersistedToDisk() &&
-            !storageEngine.getStoreProperties().isLoggedStore()) {
-          File currentDir = storageManagerUtil.getTaskStoreDir(
-              nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
-          LOG.info("Marking current directory: {} for store: {} in task: {} for deletion since it is not a logged store.",
-              currentDir, storeName, taskName);
-          storeDirsToDelete.put(storeName, currentDir);
-          // persistent but non-logged stores should not have checkpoint dirs
-          return;
-        }
+      // get the oldest and newest current changelog SSP offsets as well as the checkpointed changelog SSP offset
+      SystemStream changelog = storeChangelogs.get(storeName);
+      SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
+      SystemAdmin admin = systemAdmins.getSystemAdmin(changelogSSP.getSystem());
+      SystemStreamPartitionMetadata changelogSSPMetadata = currentChangelogOffsets.get(changelogSSP);
+      String oldestOffset = changelogSSPMetadata.getOldestOffset();
+      String newestOffset = changelogSSPMetadata.getNewestOffset();
 
-        // get the oldest and newest current changelog SSP offsets as well as the checkpointed changelog SSP offset
-        SystemStream changelog = storeChangelogs.get(storeName);
-        SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
-        SystemAdmin admin = systemAdmins.getSystemAdmin(changelogSSP.getSystem());
-        SystemStreamPartitionMetadata changelogSSPMetadata = currentChangelogOffsets.get(changelogSSP);
-        String oldestOffset = changelogSSPMetadata.getOldestOffset();
-        String newestOffset = changelogSSPMetadata.getNewestOffset();
+      String checkpointMessage = checkpointedChangelogOffsets.get(changelogSSP);
+      String checkpointedOffset = null;  // can be null if no message, or message has null offset
+      long timeSinceLastCheckpointInMs = Long.MAX_VALUE;
+      if (StringUtils.isNotBlank(checkpointMessage)) {
+        CheckpointedChangelogOffset checkpointedChangelogOffset = CheckpointedChangelogOffset.fromString(checkpointMessage);
+        checkpointedOffset = checkpointedChangelogOffset.getOffset();
+        timeSinceLastCheckpointInMs = System.currentTimeMillis() -
+            checkpointedChangelogOffset.getCheckpointId().getMillis();
+      }
 
-        String checkpointMessage = checkpointedChangelogOffsets.get(changelogSSP);
-        String checkpointedOffset = null;  // can be null if no message, or message has null offset
-        long timeSinceLastCheckpointInMs = Long.MAX_VALUE;
-        if (StringUtils.isNotBlank(checkpointMessage)) {
-          CheckpointedChangelogOffset checkpointedChangelogOffset = CheckpointedChangelogOffset.fromString(checkpointMessage);
-          checkpointedOffset = checkpointedChangelogOffset.getOffset();
-          timeSinceLastCheckpointInMs = System.currentTimeMillis() -
-              checkpointedChangelogOffset.getCheckpointId().getMillis();
-        }
+      // if the clean.store.start config is set, delete current and checkpoint dirs, restore from oldest offset to checkpointed
+      if (storageEngine.getStoreProperties().isPersistedToDisk() && new StorageConfig(
+        config).getCleanLoggedStoreDirsOnStart(storeName)) {
+        File currentDir = storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskName, taskMode);
+        LOG.info("Marking current directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
+            currentDir, storeName, taskName);
+        storeDirsToDelete.put(storeName, currentDir);
 
-        // if the clean.store.start config is set, delete current and checkpoint dirs, restore from oldest offset to checkpointed
-        if (storageEngine.getStoreProperties().isPersistedToDisk() && new StorageConfig(
-          config).getCleanLoggedStoreDirsOnStart(storeName)) {
-          File currentDir = storageManagerUtil.getTaskStoreDir(loggedStoreBaseDirectory, storeName, taskName, taskMode);
-          LOG.info("Marking current directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
-              currentDir, storeName, taskName);
-          storeDirsToDelete.put(storeName, currentDir);
+        storageManagerUtil.getTaskStoreCheckpointDirs(loggedStoreBaseDirectory, storeName, taskName, taskMode)
+            .forEach(checkpointDir -> {
+              LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
+                  checkpointDir, storeName, taskName);
+              storeDirsToDelete.put(storeName, checkpointDir);
+            });
 
-          storageManagerUtil.getTaskStoreCheckpointDirs(loggedStoreBaseDirectory, storeName, taskName, taskMode)
-              .forEach(checkpointDir -> {
-                  LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion due to clean.on.container.start config.",
-                      checkpointDir, storeName, taskName);
-                  storeDirsToDelete.put(storeName, checkpointDir);
-                });
+        LOG.info("Marking restore offsets for store: {} in task: {} to {}, {} ", storeName, taskName, oldestOffset, checkpointedOffset);
+        storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
+        return;
+      }
 
-          LOG.info("Marking restore offsets for store: {} in task: {} to {}, {} ", storeName, taskName, oldestOffset, checkpointedOffset);
-          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
-          return;
-        }
+      Optional<File> currentDirOptional;
+      Optional<List<File>> checkpointDirsOptional;
 
-        Optional<File> currentDirOptional;
-        Optional<List<File>> checkpointDirsOptional;
+      if (!storageEngine.getStoreProperties().isPersistedToDisk()) {
+        currentDirOptional = Optional.empty();
+        checkpointDirsOptional = Optional.empty();
+      } else {
+        currentDirOptional = Optional.of(storageManagerUtil.getTaskStoreDir(
+            loggedStoreBaseDirectory, storeName, taskName, taskMode));
+        checkpointDirsOptional = Optional.of(storageManagerUtil.getTaskStoreCheckpointDirs(
+            loggedStoreBaseDirectory, storeName, taskName, taskMode));
+      }
 
-        if (!storageEngine.getStoreProperties().isPersistedToDisk()) {
-          currentDirOptional = Optional.empty();
-          checkpointDirsOptional = Optional.empty();
-        } else {
-          currentDirOptional = Optional.of(storageManagerUtil.getTaskStoreDir(
-              loggedStoreBaseDirectory, storeName, taskName, taskMode));
-          checkpointDirsOptional = Optional.of(storageManagerUtil.getTaskStoreCheckpointDirs(
-              loggedStoreBaseDirectory, storeName, taskName, taskMode));
-        }
+      LOG.info("For store: {} in task: {} got current dir: {}, checkpoint dirs: {}, checkpointed changelog offset: {}",
+          storeName, taskName, currentDirOptional, checkpointDirsOptional, checkpointedOffset);
 
-        LOG.info("For store: {} in task: {} got current dir: {}, checkpoint dirs: {}, checkpointed changelog offset: {}",
-            storeName, taskName, currentDirOptional, checkpointDirsOptional, checkpointedOffset);
-
-        currentDirOptional.ifPresent(currentDir -> {
-            LOG.info("Marking current directory: {} for store: {} in task: {} for deletion.",
-                currentDir, storeName, taskName);
-            storeDirsToDelete.put(storeName, currentDir);
-          });
-
-        if (checkpointedOffset == null && oldestOffset != null) {
-          // this can mean that either this is the initial migration for this feature and there are no previously
-          // checkpointed changelog offsets, or that this is a new store or changelog topic after the initial migration.
-
-          // if this is the first time migration, it might be desirable to retain existing data.
-          // if this is new store or topic, it is possible that the container previously died after writing some data to
-          // the changelog but before a commit, so it is desirable to delete the store, not restore anything and
-          // trim the changelog
-
-          // since we can't tell the difference b/w the two scenarios by just looking at the store and changelogs,
-          // we'll request users to indicate whether to retain existing data using a config flag. this flag should only
-          // be set during migrations, and turned off after the first successful commit of the new container (i.e. next
-          // deploy). for simplicity, we'll always delete the local store, and restore from changelog if necessary.
-
-          // the former scenario should not be common. the recommended way to opt-in to the transactional state feature
-          // is to first upgrade to the latest samza version but keep the transactional state restore config off.
-          // this will create the store checkpoint directories and write the changelog offset to the checkpoint, but
-          // will not use them during restore. once this is done (i.e. at least one commit after upgrade), the
-          // transactional state restore feature can be turned on on subsequent deploys. this code path exists as a
-          // fail-safe against clearing changelogs in case users do not follow upgrade instructions and enable the
-          // feature directly.
-          checkpointDirsOptional.ifPresent(checkpointDirs ->
-              checkpointDirs.forEach(checkpointDir -> {
-                  LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since checkpointed " +
-                          "offset is null and oldest offset: {} is not.",
-                      checkpointDir, storeName, taskName, oldestOffset);
-                  storeDirsToDelete.put(storeName, checkpointDir);
-                }));
-
-          if (new TaskConfig(config).getTransactionalStateRetainExistingState()) {
-            // mark for restore from (oldest, newest) to recreate local state.
-            LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is true, " +
-                "local state will be fully restored from current changelog contents. " +
-                "There is no transactional local state guarantee.", storeName, taskName);
-            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
-          } else {
-            LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is false, " +
-                "any local state and changelog topic contents will be deleted.", storeName, taskName);
-            // mark for restore from (oldest, null) to trim entire changelog.
-            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, null));
-          }
-        } else if (// check if the checkpointed offset is out of range of current oldest and newest offsets
-            admin.offsetComparator(oldestOffset, checkpointedOffset) > 0 ||
-            admin.offsetComparator(checkpointedOffset, newestOffset) > 0) {
-          // checkpointed offset is out of range. this could mean that this is a TTL topic and the checkpointed
-          // offset was TTLd, or that the changelog topic was manually deleted and then recreated.
-          // we cannot guarantee transactional state for TTL stores, so delete everything and do a full restore
-          // for local store. if the topic was deleted and recreated, this will have the side effect of
-          // clearing the store as well.
-          LOG.warn("Checkpointed offset: {} for store: {} in task: {} is out of range of oldest: {} or newest: {} offset." +
-                  "Deleting existing store and fully restoring from changelog topic from oldest to newest offset. If the topic " +
-                  "has time-based retention, there is no transactional local state guarantees. If the topic was changed," +
-                  "local state will be cleaned up and fully restored to match the new topic contents.",
-              checkpointedOffset, storeName, taskName, oldestOffset, newestOffset);
-          checkpointDirsOptional.ifPresent(checkpointDirs ->
-              checkpointDirs.forEach(checkpointDir -> storeDirsToDelete.put(storeName, checkpointDir)));
-          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
-        } else { // happy path. checkpointed offset is in range of current oldest and newest offsets
-          if (!checkpointDirsOptional.isPresent()) { // non-persistent logged store
-            LOG.info("Did not find any checkpoint directories for logged (maybe non-persistent) store: {}. Local state " +
-                "will be fully restored from current changelog contents.", storeName);
-            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
-          } else { // persistent logged store
-            String targetOffset;
-
-            // check checkpoint time against min.compaction.lag.ms. if older, restore from checkpointed offset to newest
-            // with no trim. be conservative. allow 10% safety margin to avoid deletions when the downtime is close
-            // to min.compaction.lag.ms
-            long minCompactionLagMs = new StorageConfig(config).getChangelogMinCompactionLagMs(storeName);
-            if (timeSinceLastCheckpointInMs > .9 * minCompactionLagMs) {
-              LOG.warn("Checkpointed offset for store: {} in task: {} is: {}. It is in range of oldest: {} and " +
-                  "newest: {} changelog offset. However, time since last checkpoint is: {}, which is greater than " +
-                  "0.9 * min.compaction.lag.ms: {} for the changelog topic. Since there is a chance that" +
-                  "the changelog topic has been compacted, restoring store to the end of the current changelog contents." +
-                  "There is no transactional local state guarantee.", storeName, taskName, checkpointedOffset,
-                  oldestOffset, newestOffset, timeSinceLastCheckpointInMs, minCompactionLagMs);
-              targetOffset = newestOffset;
-            } else {
-              targetOffset = checkpointedOffset;
-            }
-
-            // if there exists a valid store checkpoint directory with oldest offset <= local offset <= target offset,
-            // retain it and restore the delta. delete all other checkpoint directories for the store. if more than one such
-            // checkpoint directory exists, retain the one with the highest local offset and delete the rest.
-            boolean hasValidCheckpointDir = false;
-            for (File checkpointDir: checkpointDirsOptional.get()) {
-              if (storageManagerUtil.isLoggedStoreValid(
-                  storeName, checkpointDir, config, storeChangelogs, taskModel, clock, storeEngines)) {
-                String localOffset = storageManagerUtil.readOffsetFile(
-                    checkpointDir, Collections.singleton(changelogSSP), false).get(changelogSSP);
-                LOG.info("Read local offset: {} for store: {} checkpoint dir: {} in task: {}", localOffset, storeName,
-                    checkpointDir, taskName);
-
-                if (admin.offsetComparator(localOffset, oldestOffset) >= 0 &&
-                    admin.offsetComparator(localOffset, targetOffset) <= 0 &&
-                    (storesToRestore.get(storeName) == null ||
-                        admin.offsetComparator(localOffset, storesToRestore.get(storeName).startingOffset) > 0)) {
-                  hasValidCheckpointDir = true;
-                  LOG.info("Temporarily marking checkpoint dir: {} for store: {} in task: {} for retention. " +
-                      "May be overridden later.", checkpointDir, storeName, taskName);
-                  storeDirToRetain.put(storeName, checkpointDir);
-                  // mark for restore even if local == checkpointed, so that the changelog gets trimmed.
-                  LOG.info("Temporarily marking store: {} in task: {} for restore from beginning offset: {} to " +
-                      "ending offset: {}. May be overridden later", storeName, taskName, localOffset, targetOffset);
-                  storesToRestore.put(storeName, new RestoreOffsets(localOffset, targetOffset));
-                }
-              }
-            }
-
-            // delete all non-retained checkpoint directories
-            for (File checkpointDir: checkpointDirsOptional.get()) {
-              if (storeDirToRetain.get(storeName) == null ||
-                  !storeDirToRetain.get(storeName).equals(checkpointDir)) {
-                LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since it is not " +
-                    "marked for retention.", checkpointDir, storeName, taskName);
-                storeDirsToDelete.put(storeName, checkpointDir);
-              }
-            }
-
-            // if the store had not valid checkpoint dirs to retain, restore from changelog
-            if (!hasValidCheckpointDir) {
-              storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, targetOffset));
-            }
-          }
-        }
+      currentDirOptional.ifPresent(currentDir -> {
+        LOG.info("Marking current directory: {} for store: {} in task: {} for deletion.",
+            currentDir, storeName, taskName);
+        storeDirsToDelete.put(storeName, currentDir);
       });
 
+      if (checkpointedOffset == null && oldestOffset != null) {
+        // this can mean that either this is the initial migration for this feature and there are no previously
+        // checkpointed changelog offsets, or that this is a new store or changelog topic after the initial migration.
+
+        // if this is the first time migration, it might be desirable to retain existing data.
+        // if this is new store or topic, it is possible that the container previously died after writing some data to
+        // the changelog but before a commit, so it is desirable to delete the store, not restore anything and
+        // trim the changelog
+
+        // since we can't tell the difference b/w the two scenarios by just looking at the store and changelogs,
+        // we'll request users to indicate whether to retain existing data using a config flag. this flag should only
+        // be set during migrations, and turned off after the first successful commit of the new container (i.e. next
+        // deploy). for simplicity, we'll always delete the local store, and restore from changelog if necessary.
+
+        // the former scenario should not be common. the recommended way to opt-in to the transactional state feature
+        // is to first upgrade to the latest samza version but keep the transactional state restore config off.
+        // this will create the store checkpoint directories and write the changelog offset to the checkpoint, but
+        // will not use them during restore. once this is done (i.e. at least one commit after upgrade), the
+        // transactional state restore feature can be turned on on subsequent deploys. this code path exists as a
+        // fail-safe against clearing changelogs in case users do not follow upgrade instructions and enable the
+        // feature directly.
+        checkpointDirsOptional.ifPresent(checkpointDirs ->
+            checkpointDirs.forEach(checkpointDir -> {
+              LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since checkpointed " +
+                      "offset is null and oldest offset: {} is not.",
+                  checkpointDir, storeName, taskName, oldestOffset);
+              storeDirsToDelete.put(storeName, checkpointDir);
+            }));
+
+        if (new TaskConfig(config).getTransactionalStateRetainExistingState()) {
+          // mark for restore from (oldest, newest) to recreate local state.
+          LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is true, " +
+              "local state will be fully restored from current changelog contents. " +
+              "There is no transactional local state guarantee.", storeName, taskName);
+          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
+        } else {
+          LOG.warn("Checkpointed offset for store: {} in task: {} is null. Since retain existing state is false, " +
+              "any local state and changelog topic contents will be deleted.", storeName, taskName);
+          // mark for restore from (oldest, null) to trim entire changelog.
+          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, null));
+        }
+      } else if (// check if the checkpointed offset is out of range of current oldest and newest offsets
+          admin.offsetComparator(oldestOffset, checkpointedOffset) > 0 ||
+          admin.offsetComparator(checkpointedOffset, newestOffset) > 0) {
+        // checkpointed offset is out of range. this could mean that this is a TTL topic and the checkpointed
+        // offset was TTLd, or that the changelog topic was manually deleted and then recreated.
+        // we cannot guarantee transactional state for TTL stores, so delete everything and do a full restore
+        // for local store. if the topic was deleted and recreated, this will have the side effect of
+        // clearing the store as well.
+        LOG.warn("Checkpointed offset: {} for store: {} in task: {} is out of range of oldest: {} or newest: {} offset." +
+                "Deleting existing store and fully restoring from changelog topic from oldest to newest offset. If the topic " +
+                "has time-based retention, there is no transactional local state guarantees. If the topic was changed," +
+                "local state will be cleaned up and fully restored to match the new topic contents.",
+            checkpointedOffset, storeName, taskName, oldestOffset, newestOffset);
+        checkpointDirsOptional.ifPresent(checkpointDirs ->
+            checkpointDirs.forEach(checkpointDir -> storeDirsToDelete.put(storeName, checkpointDir)));
+        storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, newestOffset));
+      } else { // happy path. checkpointed offset is in range of current oldest and newest offsets
+        if (!checkpointDirsOptional.isPresent()) { // non-persistent logged store
+          LOG.info("Did not find any checkpoint directories for logged (maybe non-persistent) store: {}. Local state " +
+              "will be fully restored from current changelog contents.", storeName);
+          storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, checkpointedOffset));
+        } else { // persistent logged store
+          String targetOffset;
+
+          // check checkpoint time against min.compaction.lag.ms. if older, restore from checkpointed offset to newest
+          // with no trim. be conservative. allow 10% safety margin to avoid deletions when the downtime is close
+          // to min.compaction.lag.ms
+          long minCompactionLagMs = new StorageConfig(config).getChangelogMinCompactionLagMs(storeName);
+          if (timeSinceLastCheckpointInMs > .9 * minCompactionLagMs) {
+            LOG.warn("Checkpointed offset for store: {} in task: {} is: {}. It is in range of oldest: {} and " +
+                "newest: {} changelog offset. However, time since last checkpoint is: {}, which is greater than " +
+                "0.9 * min.compaction.lag.ms: {} for the changelog topic. Since there is a chance that" +
+                "the changelog topic has been compacted, restoring store to the end of the current changelog contents." +
+                "There is no transactional local state guarantee.", storeName, taskName, checkpointedOffset,
+                oldestOffset, newestOffset, timeSinceLastCheckpointInMs, minCompactionLagMs);
+            targetOffset = newestOffset;
+          } else {
+            targetOffset = checkpointedOffset;
+          }
+
+          // if there exists a valid store checkpoint directory with oldest offset <= local offset <= target offset,
+          // retain it and restore the delta. delete all other checkpoint directories for the store. if more than one such
+          // checkpoint directory exists, retain the one with the highest local offset and delete the rest.
+          boolean hasValidCheckpointDir = false;
+          for (File checkpointDir: checkpointDirsOptional.get()) {
+            if (storageManagerUtil.isLoggedStoreValid(
+                storeName, checkpointDir, config, storeChangelogs, taskModel, clock, storeEngines)) {
+              String localOffset = storageManagerUtil.readOffsetFile(
+                  checkpointDir, Collections.singleton(changelogSSP), false).get(changelogSSP);
+              LOG.info("Read local offset: {} for store: {} checkpoint dir: {} in task: {}", localOffset, storeName,
+                  checkpointDir, taskName);
+
+              if (admin.offsetComparator(localOffset, oldestOffset) >= 0 &&
+                  admin.offsetComparator(localOffset, targetOffset) <= 0 &&
+                  (storesToRestore.get(storeName) == null ||
+                      admin.offsetComparator(localOffset, storesToRestore.get(storeName).startingOffset) > 0)) {
+                hasValidCheckpointDir = true;
+                LOG.info("Temporarily marking checkpoint dir: {} for store: {} in task: {} for retention. " +
+                    "May be overridden later.", checkpointDir, storeName, taskName);
+                storeDirToRetain.put(storeName, checkpointDir);
+                // mark for restore even if local == checkpointed, so that the changelog gets trimmed.
+                LOG.info("Temporarily marking store: {} in task: {} for restore from beginning offset: {} to " +
+                    "ending offset: {}. May be overridden later", storeName, taskName, localOffset, targetOffset);
+                storesToRestore.put(storeName, new RestoreOffsets(localOffset, targetOffset));
+              }
+            }
+          }
+
+          // delete all non-retained checkpoint directories
+          for (File checkpointDir: checkpointDirsOptional.get()) {
+            if (storeDirToRetain.get(storeName) == null ||
+                !storeDirToRetain.get(storeName).equals(checkpointDir)) {
+              LOG.info("Marking checkpoint directory: {} for store: {} in task: {} for deletion since it is not " +
+                  "marked for retention.", checkpointDir, storeName, taskName);
+              storeDirsToDelete.put(storeName, checkpointDir);
+            }
+          }
+
+          // if the store had not valid checkpoint dirs to retain, restore from changelog
+          if (!hasValidCheckpointDir) {
+            storesToRestore.put(storeName, new RestoreOffsets(oldestOffset, targetOffset));
+          }
+        }
+      }
+    });
+
     LOG.info("Store directories to be retained in Task: {} are: {}", taskName, storeDirToRetain);
     LOG.info("Store directories to be deleted in Task: {} are: {}", taskName, storeDirsToDelete);
     LOG.info("Stores to be restored in Task: {} are: {}", taskName, storesToRestore);
@@ -447,48 +447,48 @@
 
     // delete all persistent store directories marked for deletion
     storeDirsToDelete.entries().forEach(entry -> {
-        String storeName = entry.getKey();
-        File storeDirToDelete = entry.getValue();
-        LOG.info("Deleting persistent store directory: {} for store: {} in task: {}",
-            storeDirToDelete, storeName, taskName);
-        fileUtil.rm(storeDirToDelete);
-      });
+      String storeName = entry.getKey();
+      File storeDirToDelete = entry.getValue();
+      LOG.info("Deleting persistent store directory: {} for store: {} in task: {}",
+          storeDirToDelete, storeName, taskName);
+      fileUtil.rm(storeDirToDelete);
+    });
 
     // rename all retained persistent logged store checkpoint directories to current directory
     storeDirsToRetain.forEach((storeName, storeDirToRetain) -> {
-        File currentDir = storageManagerUtil.getTaskStoreDir(
-            loggedStoreBaseDirectory, storeName, taskName, taskMode);
-        LOG.info("Moving logged store checkpoint directory: {} for store: {} in task: {} to current directory: {}",
-            storeDirsToRetain.toString(), storeName, taskName, currentDir);
-        storageManagerUtil.restoreCheckpointFiles(storeDirToRetain, currentDir);
-        // do not remove the checkpoint directory yet. in case commit fails and container restarts,
-        // we can retry the move. if we delete the checkpoint, the current dir will be deleted as well on
-        // restart, and we will have to do a full restore.
-      });
+      File currentDir = storageManagerUtil.getTaskStoreDir(
+          loggedStoreBaseDirectory, storeName, taskName, taskMode);
+      LOG.info("Moving logged store checkpoint directory: {} for store: {} in task: {} to current directory: {}",
+          storeDirsToRetain.toString(), storeName, taskName, currentDir);
+      storageManagerUtil.restoreCheckpointFiles(storeDirToRetain, currentDir);
+      // do not remove the checkpoint directory yet. in case commit fails and container restarts,
+      // we can retry the move. if we delete the checkpoint, the current dir will be deleted as well on
+      // restart, and we will have to do a full restore.
+    });
 
     // create any missing (not retained) current directories for persistent stores
     storeEngines.forEach((storeName, storageEngine) -> {
-        if (storageEngine.getStoreProperties().isPersistedToDisk()) {
-          File currentDir;
-          if (storageEngine.getStoreProperties().isLoggedStore()) {
-            currentDir = storageManagerUtil.getTaskStoreDir(
-                loggedStoreBaseDirectory, storeName, taskName, taskMode);
-          } else {
-            currentDir = storageManagerUtil.getTaskStoreDir(
-                nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
-          }
-
-          try {
-            if (!fileUtil.exists(currentDir.toPath())) {
-              LOG.info("Creating missing persistent store current directory: {} for store: {} in task: {}",
-                  currentDir, storeName, taskName);
-              fileUtil.createDirectories(currentDir.toPath());
-            }
-          } catch (Exception e) {
-            throw new SamzaException(String.format("Error setting up current directory for store: %s", storeName), e);
-          }
+      if (storageEngine.getStoreProperties().isPersistedToDisk()) {
+        File currentDir;
+        if (storageEngine.getStoreProperties().isLoggedStore()) {
+          currentDir = storageManagerUtil.getTaskStoreDir(
+              loggedStoreBaseDirectory, storeName, taskName, taskMode);
+        } else {
+          currentDir = storageManagerUtil.getTaskStoreDir(
+              nonLoggedStoreBaseDirectory, storeName, taskName, taskMode);
         }
-      });
+
+        try {
+          if (!fileUtil.exists(currentDir.toPath())) {
+            LOG.info("Creating missing persistent store current directory: {} for store: {} in task: {}",
+                currentDir, storeName, taskName);
+            fileUtil.createDirectories(currentDir.toPath());
+          }
+        } catch (Exception e) {
+          throw new SamzaException(String.format("Error setting up current directory for store: %s", storeName), e);
+        }
+      }
+    });
   }
 
   /**
@@ -509,39 +509,39 @@
     // hence we register upcoming offset as the dummy offset by default and override it later if necessary.
     // using upcoming offset ensures that no messages are replayed by default.
     storeChangelogs.forEach((storeName, changelog) -> {
-        SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
-        SystemConsumer systemConsumer = storeConsumers.get(storeName);
-        SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
-        String upcomingOffset = currentOffsets.getUpcomingOffset();
-        LOG.info("Temporarily registering upcoming offset: {} as the starting offest for changelog ssp: {}. " +
-            "This might be overridden later for stores that need restoring.", upcomingOffset, changelogSSP);
-        systemConsumer.register(changelogSSP, upcomingOffset);
-      });
+      SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
+      SystemConsumer systemConsumer = storeConsumers.get(storeName);
+      SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
+      String upcomingOffset = currentOffsets.getUpcomingOffset();
+      LOG.info("Temporarily registering upcoming offset: {} as the starting offest for changelog ssp: {}. " +
+          "This might be overridden later for stores that need restoring.", upcomingOffset, changelogSSP);
+      systemConsumer.register(changelogSSP, upcomingOffset);
+    });
 
     // now register the actual starting offset if necessary. system consumer will ensure that the lower of the
     // two registered offsets is used as the starting offset.
     storesToRestore.forEach((storeName, restoreOffsets) -> {
-        SystemStream changelog = storeChangelogs.get(storeName);
-        SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
-        SystemAdmin systemAdmin = systemAdmins.getSystemAdmin(changelog.getSystem());
-        validateRestoreOffsets(restoreOffsets, systemAdmin);
+      SystemStream changelog = storeChangelogs.get(storeName);
+      SystemStreamPartition changelogSSP = new SystemStreamPartition(changelog, taskModel.getChangelogPartition());
+      SystemAdmin systemAdmin = systemAdmins.getSystemAdmin(changelog.getSystem());
+      validateRestoreOffsets(restoreOffsets, systemAdmin);
 
-        SystemConsumer systemConsumer = storeConsumers.get(storeName);
-        SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
-        String oldestOffset = currentOffsets.getOldestOffset();
+      SystemConsumer systemConsumer = storeConsumers.get(storeName);
+      SystemStreamPartitionMetadata currentOffsets = currentChangelogOffsets.get(changelogSSP);
+      String oldestOffset = currentOffsets.getOldestOffset();
 
-        // if the starting offset equals oldest offset (e.g. for full restore), start from the oldest offset (inclusive).
-        // else, start from the next (upcoming) offset.
-        String startingOffset;
-        if (systemAdmin.offsetComparator(restoreOffsets.startingOffset, oldestOffset) == 0) {
-          startingOffset = oldestOffset;
-        } else {
-          Map<SystemStreamPartition, String> offsetMap = ImmutableMap.of(changelogSSP, restoreOffsets.startingOffset);
-          startingOffset = systemAdmin.getOffsetsAfter(offsetMap).get(changelogSSP);
-        }
-        LOG.info("Registering starting offset: {} for changelog ssp: {}", startingOffset, changelogSSP);
-        systemConsumer.register(changelogSSP, startingOffset);
-      });
+      // if the starting offset equals oldest offset (e.g. for full restore), start from the oldest offset (inclusive).
+      // else, start from the next (upcoming) offset.
+      String startingOffset;
+      if (systemAdmin.offsetComparator(restoreOffsets.startingOffset, oldestOffset) == 0) {
+        startingOffset = oldestOffset;
+      } else {
+        Map<SystemStreamPartition, String> offsetMap = ImmutableMap.of(changelogSSP, restoreOffsets.startingOffset);
+        startingOffset = systemAdmin.getOffsetsAfter(offsetMap).get(changelogSSP);
+      }
+      LOG.info("Registering starting offset: {} for changelog ssp: {}", startingOffset, changelogSSP);
+      systemConsumer.register(changelogSSP, startingOffset);
+    });
   }
 
   private static void validateRestoreOffsets(RestoreOffsets restoreOffsets, SystemAdmin systemAdmin) {
diff --git a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java
index f3028f9..13ebf6e 100644
--- a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java
+++ b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemoryManager.java
@@ -111,9 +111,7 @@
   Map<SystemStreamPartition, List<IncomingMessageEnvelope>> poll(Map<SystemStreamPartition, String> sspsToOffsets) {
     return sspsToOffsets.entrySet()
         .stream()
-        .collect(Collectors.toMap(
-            Map.Entry::getKey,
-            entry -> poll(entry.getKey(), entry.getValue())));
+        .collect(Collectors.toMap(Map.Entry::getKey, entry -> poll(entry.getKey(), entry.getValue())));
   }
 
   /**
@@ -155,9 +153,8 @@
 
     return result.entrySet()
         .stream()
-        .collect(Collectors.toMap(
-            Map.Entry::getKey,
-            entry -> constructSystemStreamMetadata(entry.getKey(), entry.getValue())));
+        .collect(Collectors.toMap(Map.Entry::getKey,
+          entry -> constructSystemStreamMetadata(entry.getKey(), entry.getValue())));
   }
 
   /**
@@ -180,14 +177,39 @@
             .entrySet()
             .stream()
             .collect(Collectors.toMap(entry -> entry.getKey().getPartition(), entry -> {
-                List<IncomingMessageEnvelope> messages = entry.getValue();
-                String oldestOffset = messages.isEmpty() ? null : "0";
-                String newestOffset = messages.isEmpty() ? null : String.valueOf(messages.size() - 1);
-                String upcomingOffset = String.valueOf(messages.size());
+              List<IncomingMessageEnvelope> messages = entry.getValue();
+              Integer oldestOffset;
+              Integer newestOffset;
+              int upcomingOffset;
 
-                return new SystemStreamMetadata.SystemStreamPartitionMetadata(oldestOffset, newestOffset, upcomingOffset);
+              if (messages.isEmpty()) {
+                oldestOffset = null;
+                newestOffset = null;
+                upcomingOffset = 0;
+              } else if (messages.get(messages.size() - 1).isEndOfStream()) {
+                if (messages.size() > 1) {
+                  // don't count end of stream in offset indices
+                  oldestOffset = 0;
+                  newestOffset = messages.size() - 2;
+                  upcomingOffset = messages.size() - 1;
+                } else {
+                  // end of stream is the only message, treat the same as empty
+                  oldestOffset = null;
+                  newestOffset = null;
+                  upcomingOffset = 0;
+                }
+              } else {
+                // offsets correspond strictly to numeric indices
+                oldestOffset = 0;
+                newestOffset = messages.size() - 1;
+                upcomingOffset = messages.size();
+              }
 
-              }));
+              return new SystemStreamMetadata.SystemStreamPartitionMetadata(
+                  oldestOffset == null ? null : oldestOffset.toString(),
+                  newestOffset == null ? null : newestOffset.toString(),
+                  Integer.toString(upcomingOffset));
+            }));
 
     return new SystemStreamMetadata(streamName, partitionMetadata);
   }
diff --git a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java
index cb5478c..38ce2af 100644
--- a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java
+++ b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemAdmin.java
@@ -61,9 +61,9 @@
     return offsets.entrySet()
         .stream()
         .collect(Collectors.toMap(Map.Entry::getKey, entry -> {
-            String offset = entry.getValue();
-            return String.valueOf(Integer.valueOf(offset) + 1);
-          }));
+          String offset = entry.getValue();
+          return String.valueOf(Integer.valueOf(offset) + 1);
+        }));
   }
 
   /**
diff --git a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java
index 2c02b79..ae3d3c9 100644
--- a/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java
+++ b/samza-core/src/main/java/org/apache/samza/system/inmemory/InMemorySystemConsumer.java
@@ -148,9 +148,9 @@
 
     for (Map.Entry<SystemStreamPartition, List<IncomingMessageEnvelope>> sspToMessage : result.entrySet()) {
       sspToOffset.computeIfPresent(sspToMessage.getKey(), (ssp, offset) -> {
-          int newOffset = Integer.parseInt(offset) + sspToMessage.getValue().size();
-          return String.valueOf(newOffset);
-        });
+        int newOffset = Integer.parseInt(offset) + sspToMessage.getValue().size();
+        return String.valueOf(newOffset);
+      });
       // absent should never be the case
     }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java b/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java
index 9e2f279..8b647ed 100644
--- a/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java
+++ b/samza-core/src/main/java/org/apache/samza/table/TableConfigGenerator.java
@@ -76,10 +76,10 @@
     tableDescriptors.stream()
         .filter(d -> d instanceof LocalTableDescriptor)
         .forEach(d -> {
-            LocalTableDescriptor ld = (LocalTableDescriptor) d;
-            tableKeySerdes.put(ld.getTableId(), ld.getSerde().getKeySerde());
-            tableValueSerdes.put(ld.getTableId(), ld.getSerde().getValueSerde());
-          });
+          LocalTableDescriptor ld = (LocalTableDescriptor) d;
+          tableKeySerdes.put(ld.getTableId(), ld.getSerde().getKeySerde());
+          tableValueSerdes.put(ld.getTableId(), ld.getSerde().getValueSerde());
+        });
     serdes.addAll(tableKeySerdes.values());
     serdes.addAll(tableValueSerdes.values());
 
@@ -88,21 +88,21 @@
     Base64.Encoder base64Encoder = Base64.getEncoder();
     Map<Serde, String> serdeUUIDs = new HashMap<>();
     serdes.forEach(serde -> {
-        String serdeName = serdeUUIDs.computeIfAbsent(serde,
-            s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
-        serdeConfigs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
-            base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
-      });
+      String serdeName = serdeUUIDs.computeIfAbsent(serde,
+        s -> serde.getClass().getSimpleName() + "-" + UUID.randomUUID().toString());
+      serdeConfigs.putIfAbsent(String.format(SerializerConfig.SERDE_SERIALIZED_INSTANCE, serdeName),
+          base64Encoder.encodeToString(serializableSerde.toBytes(serde)));
+    });
 
     // Set key and msg serdes for tables to the serde names generated above
     tableKeySerdes.forEach((tableId, serde) -> {
-        String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
-        serdeConfigs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String keySerdeConfigKey = String.format(JavaTableConfig.STORE_KEY_SERDE, tableId);
+      serdeConfigs.put(keySerdeConfigKey, serdeUUIDs.get(serde));
+    });
     tableValueSerdes.forEach((tableId, serde) -> {
-        String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
-        serdeConfigs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
-      });
+      String valueSerdeConfigKey = String.format(JavaTableConfig.STORE_MSG_SERDE, tableId);
+      serdeConfigs.put(valueSerdeConfigKey, serdeUUIDs.get(serde));
+    });
     return serdeConfigs;
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/table/TableManager.java b/samza-core/src/main/java/org/apache/samza/table/TableManager.java
index d3611f8..86899de 100644
--- a/samza-core/src/main/java/org/apache/samza/table/TableManager.java
+++ b/samza-core/src/main/java/org/apache/samza/table/TableManager.java
@@ -70,9 +70,9 @@
    */
   public TableManager(Config config) {
     new JavaTableConfig(config).getTableIds().forEach(tableId -> {
-        addTable(tableId, config);
-        logger.debug("Added table " + tableId);
-      });
+      addTable(tableId, config);
+      logger.debug("Added table " + tableId);
+    });
     logger.info(String.format("Added %d tables", tableContexts.size()));
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java b/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java
index e061885..025a565 100644
--- a/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java
+++ b/samza-core/src/main/java/org/apache/samza/table/batching/BatchProcessor.java
@@ -148,13 +148,13 @@
     final long maxDelay = batch.getMaxBatchDelay().toMillis();
     if (maxDelay != Integer.MAX_VALUE) {
       scheduledFuture = scheduledExecutorService.schedule(() -> {
-          lock.lock();
-          try {
-            processBatch(false);
-          } finally {
-            lock.unlock();
-          }
-        }, maxDelay, TimeUnit.MILLISECONDS);
+        lock.lock();
+        try {
+          processBatch(false);
+        } finally {
+          lock.unlock();
+        }
+      }, maxDelay, TimeUnit.MILLISECONDS);
     }
   }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java b/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java
index 12ec6f6..67d8dd8 100644
--- a/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java
+++ b/samza-core/src/main/java/org/apache/samza/table/batching/TableBatchHandler.java
@@ -64,15 +64,15 @@
         table.getAllAsync(gets) : table.getAllAsync(gets, args);
 
     getsFuture.whenComplete((map, throwable) -> {
-        operations.forEach(operation -> {
-            GetOperation<K, V> getOperation = (GetOperation<K, V>) operation;
-            if (throwable != null) {
-              getOperation.completeExceptionally(throwable);
-            } else {
-              getOperation.complete(map.get(operation.getKey()));
-            }
-          });
+      operations.forEach(operation -> {
+        GetOperation<K, V> getOperation = (GetOperation<K, V>) operation;
+        if (throwable != null) {
+          getOperation.completeExceptionally(throwable);
+        } else {
+          getOperation.complete(map.get(operation.getKey()));
+        }
       });
+    });
     return getsFuture;
   }
 
@@ -151,11 +151,11 @@
         handleBatchDelete(getDeleteOperations(batch)),
         handleBatchGet(getQueryOperations(batch)))
         .whenComplete((val, throwable) -> {
-            if (throwable != null) {
-              batch.completeExceptionally(throwable);
-            } else {
-              batch.complete();
-            }
-          });
+          if (throwable != null) {
+            batch.completeExceptionally(throwable);
+          } else {
+            batch.complete();
+          }
+        });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java b/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java
index dee0767..cb86cac 100644
--- a/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/caching/CachingTable.java
@@ -99,10 +99,10 @@
     List<K> missKeys = new ArrayList<>();
     records.putAll(cache.getAll(keys, args));
     keys.forEach(k -> {
-        if (!records.containsKey(k)) {
-          missKeys.add(k);
-        }
-      });
+      if (!records.containsKey(k)) {
+        missKeys.add(k);
+      }
+    });
     return missKeys;
   }
 
@@ -128,16 +128,16 @@
     missCount.incrementAndGet();
 
     return table.getAsync(key, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to get the record for " + key, e);
-        } else {
-          if (result != null) {
-            cache.put(key, result, args);
-          }
-          updateTimer(metrics.getNs, clock.nanoTime() - startNs);
-          return result;
+      if (e != null) {
+        throw new SamzaException("Failed to get the record for " + key, e);
+      } else {
+        if (result != null) {
+          cache.put(key, result, args);
         }
-      });
+        updateTimer(metrics.getNs, clock.nanoTime() - startNs);
+        return result;
+      }
+    });
   }
 
   @Override
@@ -162,19 +162,19 @@
 
     long startNs = clock.nanoTime();
     return table.getAllAsync(missingKeys, args).handle((records, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to get records for " + keys, e);
-        } else {
-          if (records != null) {
-            cache.putAll(records.entrySet().stream()
-                .map(r -> new Entry<>(r.getKey(), r.getValue()))
-                .collect(Collectors.toList()), args);
-            getAllResult.putAll(records);
-          }
-          updateTimer(metrics.getAllNs, clock.nanoTime() - startNs);
-          return getAllResult;
+      if (e != null) {
+        throw new SamzaException("Failed to get records for " + keys, e);
+      } else {
+        if (records != null) {
+          cache.putAll(records.entrySet().stream()
+              .map(r -> new Entry<>(r.getKey(), r.getValue()))
+              .collect(Collectors.toList()), args);
+          getAllResult.putAll(records);
         }
-      });
+        updateTimer(metrics.getAllNs, clock.nanoTime() - startNs);
+        return getAllResult;
+      }
+    });
   }
 
   @Override
@@ -193,18 +193,18 @@
 
     long startNs = clock.nanoTime();
     return table.putAsync(key, value, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException(String.format("Failed to put a record, key=%s, value=%s", key, value), e);
-        } else if (!isWriteAround) {
-          if (value == null) {
-            cache.delete(key, args);
-          } else {
-            cache.put(key, value, args);
-          }
+      if (e != null) {
+        throw new SamzaException(String.format("Failed to put a record, key=%s, value=%s", key, value), e);
+      } else if (!isWriteAround) {
+        if (value == null) {
+          cache.delete(key, args);
+        } else {
+          cache.put(key, value, args);
         }
-        updateTimer(metrics.putNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      }
+      updateTimer(metrics.putNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -222,15 +222,15 @@
     long startNs = clock.nanoTime();
     Preconditions.checkNotNull(table, "Cannot write to a read-only table: " + table);
     return table.putAllAsync(records, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to put records " + records, e);
-        } else if (!isWriteAround) {
-          cache.putAll(records, args);
-        }
+      if (e != null) {
+        throw new SamzaException("Failed to put records " + records, e);
+      } else if (!isWriteAround) {
+        cache.putAll(records, args);
+      }
 
-        updateTimer(metrics.putAllNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      updateTimer(metrics.putAllNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -248,14 +248,14 @@
     long startNs = clock.nanoTime();
     Preconditions.checkNotNull(table, "Cannot delete from a read-only table: " + table);
     return table.deleteAsync(key, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to delete the record for " + key, e);
-        } else if (!isWriteAround) {
-          cache.delete(key, args);
-        }
-        updateTimer(metrics.deleteNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to delete the record for " + key, e);
+      } else if (!isWriteAround) {
+        cache.delete(key, args);
+      }
+      updateTimer(metrics.deleteNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -273,14 +273,14 @@
     long startNs = clock.nanoTime();
     Preconditions.checkNotNull(table, "Cannot delete from a read-only table: " + table);
     return table.deleteAllAsync(keys, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to delete the record for " + keys, e);
-        } else if (!isWriteAround) {
-          cache.deleteAll(keys, args);
-        }
-        updateTimer(metrics.deleteAllNs, clock.nanoTime() - startNs);
-        return result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to delete the record for " + keys, e);
+      } else if (!isWriteAround) {
+        cache.deleteAll(keys, args);
+      }
+      updateTimer(metrics.deleteAllNs, clock.nanoTime() - startNs);
+      return result;
+    });
   }
 
   @Override
@@ -288,12 +288,12 @@
     incCounter(metrics.numReads);
     long startNs = clock.nanoTime();
     return table.readAsync(opId, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to read, opId=" + opId, e);
-        }
-        updateTimer(metrics.readNs, clock.nanoTime() - startNs);
-        return (T) result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to read, opId=" + opId, e);
+      }
+      updateTimer(metrics.readNs, clock.nanoTime() - startNs);
+      return (T) result;
+    });
   }
 
   @Override
@@ -301,12 +301,12 @@
     incCounter(metrics.numWrites);
     long startNs = clock.nanoTime();
     return table.writeAsync(opId, args).handle((result, e) -> {
-        if (e != null) {
-          throw new SamzaException("Failed to write, opId=" + opId, e);
-        }
-        updateTimer(metrics.writeNs, clock.nanoTime() - startNs);
-        return (T) result;
-      });
+      if (e != null) {
+        throw new SamzaException("Failed to write, opId=" + opId, e);
+      }
+      updateTimer(metrics.writeNs, clock.nanoTime() - startNs);
+      return (T) result;
+    });
   }
 
   @Override
diff --git a/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java b/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java
index 02083f3..46c3c4e 100644
--- a/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/caching/guava/GuavaCacheTable.java
@@ -140,12 +140,12 @@
       List<K> delKeys = new ArrayList<>();
       List<Entry<K, V>> putRecords = new ArrayList<>();
       entries.forEach(r -> {
-          if (r.getValue() != null) {
-            putRecords.add(r);
-          } else {
-            delKeys.add(r.getKey());
-          }
-        });
+        if (r.getValue() != null) {
+          putRecords.add(r);
+        } else {
+          delKeys.add(r.getKey());
+        }
+      });
 
       cache.invalidateAll(delKeys);
       putRecords.forEach(e -> put(e.getKey(), e.getValue()));
diff --git a/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java b/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java
index 75fed12..b652a15 100644
--- a/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/ratelimit/AsyncRateLimitedTable.java
@@ -66,57 +66,57 @@
   @Override
   public CompletableFuture<V> getAsync(K key, Object ... args) {
     return doRead(
-        () -> readRateLimiter.throttle(key, args),
-        () -> table.getAsync(key, args));
+      () -> readRateLimiter.throttle(key, args),
+      () -> table.getAsync(key, args));
   }
 
   @Override
   public CompletableFuture<Map<K, V>> getAllAsync(List<K> keys, Object ... args) {
     return doRead(
-        () -> readRateLimiter.throttle(keys, args),
-        () -> table.getAllAsync(keys, args));
+      () -> readRateLimiter.throttle(keys, args),
+      () -> table.getAllAsync(keys, args));
   }
 
   @Override
   public <T> CompletableFuture<T> readAsync(int opId, Object ... args) {
     return doRead(
-        () -> readRateLimiter.throttle(opId, args),
-        () -> table.readAsync(opId, args));
+      () -> readRateLimiter.throttle(opId, args),
+      () -> table.readAsync(opId, args));
   }
 
   @Override
   public CompletableFuture<Void> putAsync(K key, V value, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(key, value, args),
-        () -> table.putAsync(key, value, args));
+      () -> writeRateLimiter.throttle(key, value, args),
+      () -> table.putAsync(key, value, args));
   }
 
   @Override
   public CompletableFuture<Void> putAllAsync(List<Entry<K, V>> entries, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttleRecords(entries),
-        () -> table.putAllAsync(entries, args));
+      () -> writeRateLimiter.throttleRecords(entries),
+      () -> table.putAllAsync(entries, args));
   }
 
   @Override
   public CompletableFuture<Void> deleteAsync(K key, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(key, args),
-        () -> table.deleteAsync(key, args));
+      () -> writeRateLimiter.throttle(key, args),
+      () -> table.deleteAsync(key, args));
   }
 
   @Override
   public CompletableFuture<Void> deleteAllAsync(List<K> keys, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(keys, args),
-        () -> table.deleteAllAsync(keys, args));
+      () -> writeRateLimiter.throttle(keys, args),
+      () -> table.deleteAllAsync(keys, args));
   }
 
   @Override
   public <T> CompletableFuture<T> writeAsync(int opId, Object ... args) {
     return doWrite(
-        () -> writeRateLimiter.throttle(opId, args),
-        () -> table.writeAsync(opId, args));
+      () -> writeRateLimiter.throttle(opId, args),
+      () -> table.writeAsync(opId, args));
   }
 
   @Override
diff --git a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java
index 6d6c23a..eb54de2 100644
--- a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java
+++ b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTable.java
@@ -173,14 +173,14 @@
     Preconditions.checkNotNull(key, "null key");
     return instrument(() -> asyncTable.getAsync(key, args), metrics.numGets, metrics.getNs)
         .handle((result, e) -> {
-            if (e != null) {
-              throw new SamzaException("Failed to get the records for " + key, e);
-            }
-            if (result == null) {
-              incCounter(metrics.numMissedLookups);
-            }
-            return result;
-          });
+          if (e != null) {
+            throw new SamzaException("Failed to get the records for " + key, e);
+          }
+          if (result == null) {
+            incCounter(metrics.numMissedLookups);
+          }
+          return result;
+        });
   }
 
   @Override
@@ -200,12 +200,12 @@
     }
     return instrument(() -> asyncTable.getAllAsync(keys, args), metrics.numGetAlls, metrics.getAllNs)
         .handle((result, e) -> {
-            if (e != null) {
-              throw new SamzaException("Failed to get the records for " + keys, e);
-            }
-            result.values().stream().filter(Objects::isNull).forEach(v -> incCounter(metrics.numMissedLookups));
-            return result;
-          });
+          if (e != null) {
+            throw new SamzaException("Failed to get the records for " + keys, e);
+          }
+          result.values().stream().filter(Objects::isNull).forEach(v -> incCounter(metrics.numMissedLookups));
+          return result;
+        });
   }
 
   @Override
@@ -221,8 +221,8 @@
   public <T> CompletableFuture<T> readAsync(int opId, Object ... args) {
     return (CompletableFuture<T>) instrument(() -> asyncTable.readAsync(opId, args), metrics.numReads, metrics.readNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to read, opId=%d", opId), e);
-          });
+          throw new SamzaException(String.format("Failed to read, opId=%d", opId), e);
+        });
   }
 
   @Override
@@ -244,8 +244,8 @@
 
     return instrument(() -> asyncTable.putAsync(key, value, args), metrics.numPuts, metrics.putNs)
         .exceptionally(e -> {
-            throw new SamzaException("Failed to put a record with key=" + key, (Throwable) e);
-          });
+          throw new SamzaException("Failed to put a record with key=" + key, (Throwable) e);
+        });
   }
 
   @Override
@@ -281,9 +281,9 @@
         deleteFuture,
         instrument(() -> asyncTable.putAllAsync(putRecords, args), metrics.numPutAlls, metrics.putAllNs))
         .exceptionally(e -> {
-            String strKeys = records.stream().map(r -> r.getKey().toString()).collect(Collectors.joining(","));
-            throw new SamzaException(String.format("Failed to put records with keys=" + strKeys), e);
-          });
+          String strKeys = records.stream().map(r -> r.getKey().toString()).collect(Collectors.joining(","));
+          throw new SamzaException(String.format("Failed to put records with keys=" + strKeys), e);
+        });
   }
 
   @Override
@@ -301,8 +301,8 @@
     Preconditions.checkNotNull(key, "null key");
     return instrument(() -> asyncTable.deleteAsync(key, args), metrics.numDeletes, metrics.deleteNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to delete the record for " + key), (Throwable) e);
-          });
+          throw new SamzaException(String.format("Failed to delete the record for " + key), (Throwable) e);
+        });
   }
 
   @Override
@@ -324,8 +324,8 @@
 
     return instrument(() -> asyncTable.deleteAllAsync(keys, args), metrics.numDeleteAlls, metrics.deleteAllNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to delete records for " + keys), e);
-          });
+          throw new SamzaException(String.format("Failed to delete records for " + keys), e);
+        });
   }
 
   @Override
@@ -341,8 +341,8 @@
   public <T> CompletableFuture<T> writeAsync(int opId, Object... args) {
     return (CompletableFuture<T>) instrument(() -> asyncTable.writeAsync(opId, args), metrics.numWrites, metrics.writeNs)
         .exceptionally(e -> {
-            throw new SamzaException(String.format("Failed to write, opId=%d", opId), e);
-          });
+          throw new SamzaException(String.format("Failed to write, opId=%d", opId), e);
+        });
   }
 
   @Override
@@ -390,14 +390,14 @@
     CompletableFuture<T> ioFuture = func.apply();
     if (callbackExecutor != null) {
       ioFuture.thenApplyAsync(r -> {
-          updateTimer(timer, clock.nanoTime() - startNs);
-          return r;
-        }, callbackExecutor);
+        updateTimer(timer, clock.nanoTime() - startNs);
+        return r;
+      }, callbackExecutor);
     } else {
       ioFuture.thenApply(r -> {
-          updateTimer(timer, clock.nanoTime() - startNs);
-          return r;
-        });
+        updateTimer(timer, clock.nanoTime() - startNs);
+        return r;
+      });
     }
     return ioFuture;
   }
diff --git a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java
index 8cd1c07..36078ef 100644
--- a/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java
+++ b/samza-core/src/main/java/org/apache/samza/table/remote/RemoteTableProvider.java
@@ -100,33 +100,33 @@
     if (callbackPoolSize > 0) {
       callbackExecutors.computeIfAbsent(tableId, (arg) ->
           Executors.newFixedThreadPool(callbackPoolSize, (runnable) -> {
-              Thread thread = new Thread(runnable);
-              thread.setName("table-" + tableId + "-async-callback-pool");
-              thread.setDaemon(true);
-              return thread;
-            }));
+            Thread thread = new Thread(runnable);
+            thread.setName("table-" + tableId + "-async-callback-pool");
+            thread.setDaemon(true);
+            return thread;
+          }));
     }
 
     boolean isRateLimited = readRateLimiter != null || writeRateLimiter != null;
     if (isRateLimited) {
       rateLimitingExecutors.computeIfAbsent(tableId, (arg) ->
           Executors.newSingleThreadExecutor(runnable -> {
-              Thread thread = new Thread(runnable);
-              thread.setName("table-" + tableId + "-async-executor");
-              thread.setDaemon(true);
-              return thread;
-            }));
+            Thread thread = new Thread(runnable);
+            thread.setName("table-" + tableId + "-async-executor");
+            thread.setDaemon(true);
+            return thread;
+          }));
     }
 
     BatchProvider batchProvider = deserializeObject(tableConfig, RemoteTableDescriptor.BATCH_PROVIDER);
     if (batchProvider != null) {
       batchExecutors.computeIfAbsent(tableId, (arg) ->
           Executors.newSingleThreadScheduledExecutor(runnable -> {
-              Thread thread = new Thread(runnable);
-              thread.setName("table-" + tableId + "-batch-scheduled-executor");
-              thread.setDaemon(true);
-              return thread;
-            }));
+            Thread thread = new Thread(runnable);
+            thread.setName("table-" + tableId + "-batch-scheduled-executor");
+            thread.setDaemon(true);
+            return thread;
+          }));
     }
 
 
@@ -162,11 +162,11 @@
 
   private ScheduledExecutorService createRetryExecutor() {
     return Executors.newSingleThreadScheduledExecutor(runnable -> {
-        Thread thread = new Thread(runnable);
-        thread.setName("table-retry-executor");
-        thread.setDaemon(true);
-        return thread;
-      });
+      Thread thread = new Thread(runnable);
+      thread.setName("table-retry-executor");
+      thread.setDaemon(true);
+      return thread;
+    });
   }
 }
 
diff --git a/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java b/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java
index b2eccd8..650d03a 100644
--- a/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java
+++ b/samza-core/src/main/java/org/apache/samza/table/retry/FailsafeAdapter.java
@@ -89,15 +89,15 @@
     return Failsafe.with(retryPolicy).with(retryExec)
         .onRetry(e -> metrics.retryCount.inc())
         .onRetriesExceeded(e -> {
-            metrics.retryTimer.update(System.currentTimeMillis() - startMs);
-            metrics.permFailureCount.inc();
-          })
+          metrics.retryTimer.update(System.currentTimeMillis() - startMs);
+          metrics.permFailureCount.inc();
+        })
         .onSuccess((e, ctx) -> {
-            if (ctx.getExecutions() > 1) {
-              metrics.retryTimer.update(System.currentTimeMillis() - startMs);
-            } else {
-              metrics.successCount.inc();
-            }
-          });
+          if (ctx.getExecutions() > 1) {
+            metrics.retryTimer.update(System.currentTimeMillis() - startMs);
+          } else {
+            metrics.successCount.inc();
+          }
+        });
   }
 }
diff --git a/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java b/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java
index a0ec7b8..5a474cd 100644
--- a/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java
+++ b/samza-core/src/main/java/org/apache/samza/task/StreamOperatorTask.java
@@ -140,12 +140,12 @@
           }
 
           processFuture.whenComplete((val, ex) -> {
-              if (ex != null) {
-                callback.failure(ex);
-              } else {
-                callback.complete();
-              }
-            });
+            if (ex != null) {
+              callback.failure(ex);
+            } else {
+              callback.complete();
+            }
+          });
         }
       } catch (Exception e) {
         LOG.error("Failed to process the incoming message due to ", e);
diff --git a/samza-core/src/main/java/org/apache/samza/util/DiagnosticsUtil.java b/samza-core/src/main/java/org/apache/samza/util/DiagnosticsUtil.java
index 7c83466..0b88fa0 100644
--- a/samza-core/src/main/java/org/apache/samza/util/DiagnosticsUtil.java
+++ b/samza-core/src/main/java/org/apache/samza/util/DiagnosticsUtil.java
@@ -36,6 +36,7 @@
 import org.apache.samza.diagnostics.DiagnosticsManager;
 import org.apache.samza.job.model.JobModel;
 import org.apache.samza.metrics.MetricsRegistryMap;
+import org.apache.samza.metrics.MetricsReporterFactory;
 import org.apache.samza.metrics.reporter.Metrics;
 import org.apache.samza.metrics.reporter.MetricsHeader;
 import org.apache.samza.metrics.reporter.MetricsSnapshot;
@@ -51,7 +52,6 @@
 import org.apache.samza.system.SystemStream;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import scala.Option;
 
 
 public class DiagnosticsUtil {
@@ -100,54 +100,59 @@
       String jobId, JobModel jobModel, String containerId, Optional<String> execEnvContainerId, Config config) {
 
     JobConfig jobConfig = new JobConfig(config);
+    MetricsConfig metricsConfig = new MetricsConfig(config);
     Optional<Pair<DiagnosticsManager, MetricsSnapshotReporter>> diagnosticsManagerReporterPair = Optional.empty();
 
     if (jobConfig.getDiagnosticsEnabled()) {
 
+      // Diagnostics MetricReporter init
+      String diagnosticsReporterName = MetricsConfig.METRICS_SNAPSHOT_REPORTER_NAME_FOR_DIAGNOSTICS;
+      String diagnosticsFactoryClassName = metricsConfig.getMetricsFactoryClass(diagnosticsReporterName)
+          .orElseThrow(() -> new SamzaException(
+              String.format("Diagnostics reporter %s missing .class config", diagnosticsReporterName)));
+      MetricsReporterFactory metricsReporterFactory =
+          ReflectionUtil.getObj(diagnosticsFactoryClassName, MetricsReporterFactory.class);
+      MetricsSnapshotReporter diagnosticsReporter =
+          (MetricsSnapshotReporter) metricsReporterFactory.getMetricsReporter(diagnosticsReporterName,
+              "samza-container-" + containerId, config);
+
+      // DiagnosticsManager init
       ClusterManagerConfig clusterManagerConfig = new ClusterManagerConfig(config);
       int containerMemoryMb = clusterManagerConfig.getContainerMemoryMb();
       int containerNumCores = clusterManagerConfig.getNumCores();
       long maxHeapSizeBytes = Runtime.getRuntime().maxMemory();
       int containerThreadPoolSize = jobConfig.getThreadPoolSize();
-
-      // Diagnostic stream, producer, and reporter related parameters
-      String diagnosticsReporterName = MetricsConfig.METRICS_SNAPSHOT_REPORTER_NAME_FOR_DIAGNOSTICS;
-      MetricsConfig metricsConfig = new MetricsConfig(config);
-      int publishInterval = metricsConfig.getMetricsSnapshotReporterInterval(diagnosticsReporterName);
       String taskClassVersion = Util.getTaskClassVersion(config);
       String samzaVersion = Util.getSamzaVersion();
       String hostName = Util.getLocalHost().getHostName();
-      Optional<String> diagnosticsReporterStreamName = metricsConfig.getMetricsSnapshotReporterStream(diagnosticsReporterName);
+      Optional<String> diagnosticsReporterStreamName =
+          metricsConfig.getMetricsSnapshotReporterStream(diagnosticsReporterName);
 
       if (!diagnosticsReporterStreamName.isPresent()) {
-        throw new ConfigException("Missing required config: " + String.format(MetricsConfig.METRICS_SNAPSHOT_REPORTER_STREAM, diagnosticsReporterName));
+        throw new ConfigException(
+            "Missing required config: " + String.format(MetricsConfig.METRICS_SNAPSHOT_REPORTER_STREAM,
+                diagnosticsReporterName));
       }
-
       SystemStream diagnosticsSystemStream = StreamUtil.getSystemStreamFromNames(diagnosticsReporterStreamName.get());
 
+      // Create a SystemProducer for DiagnosticsManager. This producer is used by the DiagnosticsManager
+      // to write to the same stream as the MetricsSnapshotReporter called `diagnosticsreporter`.
       Optional<String> diagnosticsSystemFactoryName =
           new SystemConfig(config).getSystemFactory(diagnosticsSystemStream.getSystem());
       if (!diagnosticsSystemFactoryName.isPresent()) {
         throw new SamzaException("Missing factory in config for system " + diagnosticsSystemStream.getSystem());
       }
-
-      // Create a systemProducer for giving to diagnostic-reporter and diagnosticsManager
       SystemFactory systemFactory = ReflectionUtil.getObj(diagnosticsSystemFactoryName.get(), SystemFactory.class);
       SystemProducer systemProducer =
           systemFactory.getProducer(diagnosticsSystemStream.getSystem(), config, new MetricsRegistryMap());
+
       DiagnosticsManager diagnosticsManager =
           new DiagnosticsManager(jobName, jobId, jobModel.getContainers(), containerMemoryMb, containerNumCores,
-              new StorageConfig(config).getNumPersistentStores(), maxHeapSizeBytes, containerThreadPoolSize, containerId, execEnvContainerId.orElse(""),
-              taskClassVersion, samzaVersion, hostName, diagnosticsSystemStream, systemProducer,
+              new StorageConfig(config).getNumPersistentStores(), maxHeapSizeBytes, containerThreadPoolSize,
+              containerId, execEnvContainerId.orElse(""), taskClassVersion, samzaVersion, hostName,
+              diagnosticsSystemStream, systemProducer,
               Duration.ofMillis(new TaskConfig(config).getShutdownMs()), jobConfig.getAutosizingEnabled());
 
-      Option<String> blacklist = ScalaJavaUtil.JavaOptionals$.MODULE$.toRichOptional(
-          metricsConfig.getMetricsSnapshotReporterBlacklist(diagnosticsReporterName)).toOption();
-      MetricsSnapshotReporter diagnosticsReporter =
-          new MetricsSnapshotReporter(systemProducer, diagnosticsSystemStream, publishInterval, jobName, jobId,
-              "samza-container-" + containerId, taskClassVersion, samzaVersion, hostName, new MetricsSnapshotSerdeV2(),
-              blacklist, ScalaJavaUtil.toScalaFunction(() -> System.currentTimeMillis()));
-
       diagnosticsManagerReporterPair = Optional.of(new ImmutablePair<>(diagnosticsManager, diagnosticsReporter));
     }
 
diff --git a/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java b/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java
index adb637e..60c86c1 100644
--- a/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java
+++ b/samza-core/src/main/java/org/apache/samza/util/EmbeddedTaggedRateLimiter.java
@@ -75,15 +75,15 @@
     Stopwatch stopwatch = Stopwatch.createStarted();
     return tagToCreditsMap.entrySet().stream()
         .map(e -> {
-            String tag = e.getKey();
-            int requiredCredits = e.getValue();
-            long remainingTimeoutInNanos = Math.max(0L, timeoutInNanos - stopwatch.elapsed(NANOSECONDS));
-            com.google.common.util.concurrent.RateLimiter rateLimiter = tagToRateLimiterMap.get(tag);
-            int availableCredits = rateLimiter.tryAcquire(requiredCredits, remainingTimeoutInNanos, NANOSECONDS)
-                ? requiredCredits
-                : 0;
-            return new ImmutablePair<>(tag, availableCredits);
-          })
+          String tag = e.getKey();
+          int requiredCredits = e.getValue();
+          long remainingTimeoutInNanos = Math.max(0L, timeoutInNanos - stopwatch.elapsed(NANOSECONDS));
+          com.google.common.util.concurrent.RateLimiter rateLimiter = tagToRateLimiterMap.get(tag);
+          int availableCredits = rateLimiter.tryAcquire(requiredCredits, remainingTimeoutInNanos, NANOSECONDS)
+              ? requiredCredits
+              : 0;
+          return new ImmutablePair<>(tag, availableCredits);
+        })
         .collect(Collectors.toMap(ImmutablePair::getKey, ImmutablePair::getValue));
   }
 
@@ -110,22 +110,22 @@
   public void init(Context context) {
     this.tagToRateLimiterMap = Collections.unmodifiableMap(tagToTargetRateMap.entrySet().stream()
         .map(e -> {
-            String tag = e.getKey();
-            JobModel jobModel = ((TaskContextImpl) context.getTaskContext()).getJobModel();
-            int numTasks = jobModel.getContainers().values().stream()
-                .mapToInt(cm -> cm.getTasks().size())
-                .sum();
-            double effectiveRate = (double) e.getValue() / numTasks;
-            TaskName taskName = context.getTaskContext().getTaskModel().getTaskName();
-            LOGGER.info(String.format("Effective rate limit for task %s and tag %s is %f", taskName, tag,
-                effectiveRate));
-            if (effectiveRate < 1.0) {
-              LOGGER.warn(String.format("Effective limit rate (%f) is very low. "
-                              + "Total rate limit is %d while number of tasks is %d. Consider increasing the rate limit.",
-                        effectiveRate, e.getValue(), numTasks));
-            }
-            return new ImmutablePair<>(tag, com.google.common.util.concurrent.RateLimiter.create(effectiveRate));
-          })
+          String tag = e.getKey();
+          JobModel jobModel = ((TaskContextImpl) context.getTaskContext()).getJobModel();
+          int numTasks = jobModel.getContainers().values().stream()
+              .mapToInt(cm -> cm.getTasks().size())
+              .sum();
+          double effectiveRate = (double) e.getValue() / numTasks;
+          TaskName taskName = context.getTaskContext().getTaskModel().getTaskName();
+          LOGGER.info(String.format("Effective rate limit for task %s and tag %s is %f", taskName, tag,
+              effectiveRate));
+          if (effectiveRate < 1.0) {
+            LOGGER.warn(String.format("Effective limit rate (%f) is very low. "
+                            + "Total rate limit is %d while number of tasks is %d. Consider increasing the rate limit.",
+                      effectiveRate, e.getValue(), numTasks));
+          }
+          return new ImmutablePair<>(tag, com.google.common.util.concurrent.RateLimiter.create(effectiveRate));
+        })
         .collect(Collectors.toMap(ImmutablePair::getKey, ImmutablePair::getValue))
     );
     initialized = true;
diff --git a/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java b/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java
index e15fe7f..1b94824 100644
--- a/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java
+++ b/samza-core/src/main/java/org/apache/samza/util/ReflectionUtil.java
@@ -97,10 +97,10 @@
         Class<?>[] argClasses = new Class<?>[args.length];
         Object[] argValues = new Object[args.length];
         IntStream.range(0, args.length).forEach(i -> {
-            ConstructorArgument<?> constructorArgument = args[i];
-            argClasses[i] = constructorArgument.getClazz();
-            argValues[i] = constructorArgument.getValue();
-          });
+          ConstructorArgument<?> constructorArgument = args[i];
+          argClasses[i] = constructorArgument.getClazz();
+          argValues[i] = constructorArgument.getValue();
+        });
         Constructor<T> constructor = classObj.getDeclaredConstructor(argClasses);
         return constructor.newInstance(argValues);
       }
diff --git a/samza-core/src/main/java/org/apache/samza/util/SplitDeploymentUtil.java b/samza-core/src/main/java/org/apache/samza/util/SplitDeploymentUtil.java
new file mode 100644
index 0000000..200cd3c
--- /dev/null
+++ b/samza-core/src/main/java/org/apache/samza/util/SplitDeploymentUtil.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.util;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import org.apache.samza.SamzaException;
+import org.apache.samza.config.ShellCommandConfig;
+
+
+public final class SplitDeploymentUtil {
+
+  /**
+   * The split deployment feature uses system env {@code ShellCommandConfig.ENV_SPLIT_DEPLOYMENT_ENABLED} to represent
+   * if the user chooses to enable it.
+   * This function helps to detect if the split deployment feature is enabled.
+   *
+   * @return true if split deployment is enabled; vice versa
+   */
+  public static boolean isSplitDeploymentEnabled() {
+    return Boolean.parseBoolean(System.getenv(ShellCommandConfig.ENV_SPLIT_DEPLOYMENT_ENABLED));
+  }
+
+  /**
+   * Execute the runner class using a separate isolated classloader.
+   * @param classLoader {@link ClassLoader} to use to load the runner class which will run
+   * @param originalRunnerClass {@link Class} for which will be executed with the new class loader.
+   * @param runMethodName run method name of runner class
+   * @param runMethodArgs arguments to pass to run method
+   */
+  public static void runWithClassLoader(ClassLoader classLoader, Class<?> originalRunnerClass, String runMethodName,
+      String[] runMethodArgs) {
+    // need to use the isolated classloader to load run method and then execute using that new class
+    Class<?> runnerClass;
+    try {
+      runnerClass = classLoader.loadClass(originalRunnerClass.getName());
+    } catch (ClassNotFoundException e) {
+      throw new SamzaException(String.format(
+          "Isolation was enabled, but unable to find %s in isolated classloader", originalRunnerClass.getName()), e);
+    }
+
+    // save the current context classloader so it can be reset after finishing the call to run method
+    ClassLoader previousContextClassLoader = Thread.currentThread().getContextClassLoader();
+    // this is needed because certain libraries (e.g. log4j) use the context classloader
+    Thread.currentThread().setContextClassLoader(classLoader);
+
+    try {
+      executeRunForRunnerClass(runnerClass, runMethodName, runMethodArgs);
+    } finally {
+      // reset the context class loader; it's good practice, and could be important when running a test suite
+      Thread.currentThread().setContextClassLoader(previousContextClassLoader);
+    }
+  }
+
+  private static void executeRunForRunnerClass(Class<?> runnerClass, String runMethodName, String[] runMethodArgs) {
+    Method runMethod;
+    try {
+      runMethod = runnerClass.getDeclaredMethod(runMethodName, String[].class);
+    } catch (NoSuchMethodException e) {
+      throw new SamzaException(String.format("Isolation was enabled, but unable to find %s method", runMethodName), e);
+    }
+    // only sets accessible flag for this method instance
+    runMethod.setAccessible(true);
+
+    try {
+      // wrapping args in object array so that args is passed as a single argument to the method
+      runMethod.invoke(null, new Object[]{runMethodArgs});
+    } catch (IllegalAccessException | InvocationTargetException e) {
+      throw new SamzaException(String.format("Exception while executing %s method", runMethodName), e);
+    }
+  }
+}
diff --git a/samza-core/src/main/java/org/apache/samza/util/Util.java b/samza-core/src/main/java/org/apache/samza/util/Util.java
index 875b6da..f233c32 100644
--- a/samza-core/src/main/java/org/apache/samza/util/Util.java
+++ b/samza-core/src/main/java/org/apache/samza/util/Util.java
@@ -45,14 +45,17 @@
    * Make an environment variable string safe to pass.
    */
   public static String envVarEscape(String str) {
-    return str.replace("\"", "\\\"").replace("'", "\\'");
+    return str
+        .replace("\\", "\\\\")
+        .replace("\"", "\\\"")
+        .replace("`", "\\`");
   }
 
   public static String getSamzaVersion() {
     return Optional.ofNullable(Util.class.getPackage().getImplementationVersion()).orElseGet(() -> {
-        LOG.warn("Unable to find implementation samza version in jar's meta info. Defaulting to {}", FALLBACK_VERSION);
-        return FALLBACK_VERSION;
-      });
+      LOG.warn("Unable to find implementation samza version in jar's meta info. Defaulting to {}", FALLBACK_VERSION);
+      return FALLBACK_VERSION;
+    });
   }
 
   public static String getTaskClassVersion(Config config) {
diff --git a/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java b/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java
index 51f5555..b951b6e 100644
--- a/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java
+++ b/samza-core/src/main/java/org/apache/samza/zk/ZkBarrierForVersionUpgrade.java
@@ -194,17 +194,17 @@
       // check if all the expected participants are in
       if (participantIds.size() == expectedParticipantIds.size() && CollectionUtils.containsAll(participantIds, expectedParticipantIds)) {
         debounceTimer.scheduleAfterDebounceTime(ACTION_NAME, 0, () -> {
-            String barrierStatePath = keyBuilder.getBarrierStatePath(barrierVersion);
-            State barrierState = State.valueOf(zkUtils.getZkClient().readData(barrierStatePath));
-            if (Objects.equals(barrierState, State.NEW)) {
-              LOG.info(String.format("Expected participants has joined the barrier version: %s. Marking the barrier state: %s as %s.", barrierVersion, barrierStatePath, State.DONE));
-              zkUtils.writeData(barrierStatePath, State.DONE.toString()); // this will trigger notifications
-            } else {
-              LOG.debug(String.format("Barrier version: %s is at: %s state. Not marking barrier as %s.", barrierVersion, barrierState, State.DONE));
-            }
-            LOG.info("Unsubscribing child changes on the path: {} for barrier version: {}.", barrierParticipantPath, barrierVersion);
-            zkUtils.unsubscribeChildChanges(barrierParticipantPath, this);
-          });
+          String barrierStatePath = keyBuilder.getBarrierStatePath(barrierVersion);
+          State barrierState = State.valueOf(zkUtils.getZkClient().readData(barrierStatePath));
+          if (Objects.equals(barrierState, State.NEW)) {
+            LOG.info(String.format("Expected participants has joined the barrier version: %s. Marking the barrier state: %s as %s.", barrierVersion, barrierStatePath, State.DONE));
+            zkUtils.writeData(barrierStatePath, State.DONE.toString()); // this will trigger notifications
+          } else {
+            LOG.debug(String.format("Barrier version: %s is at: %s state. Not marking barrier as %s.", barrierVersion, barrierState, State.DONE));
+          }
+          LOG.info("Unsubscribing child changes on the path: {} for barrier version: {}.", barrierParticipantPath, barrierVersion);
+          zkUtils.unsubscribeChildChanges(barrierParticipantPath, this);
+        });
       }
     }
   }
diff --git a/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java b/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java
index 86c1f06..e03ce8b 100644
--- a/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java
+++ b/samza-core/src/main/java/org/apache/samza/zk/ZkJobCoordinator.java
@@ -142,9 +142,9 @@
     this.debounceTimeMs = new JobConfig(config).getDebounceTimeMs();
     debounceTimer = new ScheduleAfterDebounceTime(processorId);
     debounceTimer.setScheduledTaskCallback(throwable -> {
-        LOG.error("Received exception in debounce timer! Stopping the job coordinator", throwable);
-        stop();
-      });
+      LOG.error("Received exception in debounce timer! Stopping the job coordinator", throwable);
+      stop();
+    });
     this.barrier =  new ZkBarrierForVersionUpgrade(zkUtils.getKeyBuilder().getJobModelVersionBarrierPrefix(), zkUtils, new ZkBarrierListenerImpl(), debounceTimer);
     systemAdmins = new SystemAdmins(config);
     streamMetadataCache = new StreamMetadataCache(systemAdmins, METADATA_CACHE_TTL_MS, SystemClock.instance());
@@ -375,11 +375,11 @@
     Set<SystemStream> inputStreamsToMonitor = new TaskConfig(config).getAllInputStreams();
 
     return new StreamPartitionCountMonitor(
-            inputStreamsToMonitor,
-            streamMetadata,
-            metrics.getMetricsRegistry(),
-            new JobConfig(config).getMonitorPartitionChangeFrequency(),
-            streamsChanged -> {
+        inputStreamsToMonitor,
+        streamMetadata,
+        metrics.getMetricsRegistry(),
+        new JobConfig(config).getMonitorPartitionChangeFrequency(),
+      streamsChanged -> {
         if (leaderElector.amILeader()) {
           debounceTimer.scheduleAfterDebounceTime(ON_PROCESSOR_CHANGE, 0, this::doOnProcessorChange);
         }
@@ -466,20 +466,20 @@
       metrics.singleBarrierRebalancingTime.update(System.nanoTime() - startTime);
       if (ZkBarrierForVersionUpgrade.State.DONE.equals(state)) {
         debounceTimer.scheduleAfterDebounceTime(barrierAction, 0, () -> {
-            LOG.info("pid=" + processorId + "new version " + version + " of the job model got confirmed");
+          LOG.info("pid=" + processorId + "new version " + version + " of the job model got confirmed");
 
-            // read the new Model
-            JobModel jobModel = getJobModel();
-            // start the container with the new model
-            if (coordinatorListener != null) {
-              for (ContainerModel containerModel : jobModel.getContainers().values()) {
-                for (TaskName taskName : containerModel.getTasks().keySet()) {
-                  zkUtils.writeTaskLocality(taskName, locationId);
-                }
+          // read the new Model
+          JobModel jobModel = getJobModel();
+          // start the container with the new model
+          if (coordinatorListener != null) {
+            for (ContainerModel containerModel : jobModel.getContainers().values()) {
+              for (TaskName taskName : containerModel.getTasks().keySet()) {
+                zkUtils.writeTaskLocality(taskName, locationId);
               }
-              coordinatorListener.onNewJobModel(processorId, jobModel);
             }
-          });
+            coordinatorListener.onNewJobModel(processorId, jobModel);
+          }
+        });
       } else {
         if (ZkBarrierForVersionUpgrade.State.TIMED_OUT.equals(state)) {
           // no-op for non-leaders
@@ -539,26 +539,26 @@
     @Override
     public void doHandleDataChange(String dataPath, Object data) {
       debounceTimer.scheduleAfterDebounceTime(JOB_MODEL_VERSION_CHANGE, 0, () -> {
-          String jobModelVersion = (String) data;
+        String jobModelVersion = (String) data;
 
-          LOG.info("Got a notification for new JobModel version. Path = {} Version = {}", dataPath, data);
+        LOG.info("Got a notification for new JobModel version. Path = {} Version = {}", dataPath, data);
 
-          newJobModel = readJobModelFromMetadataStore(jobModelVersion);
-          LOG.info("pid=" + processorId + ": new JobModel is available. Version =" + jobModelVersion + "; JobModel = " + newJobModel);
+        newJobModel = readJobModelFromMetadataStore(jobModelVersion);
+        LOG.info("pid=" + processorId + ": new JobModel is available. Version =" + jobModelVersion + "; JobModel = " + newJobModel);
 
-          if (!newJobModel.getContainers().containsKey(processorId)) {
-            LOG.info("New JobModel does not contain pid={}. Stopping this processor. New JobModel: {}",
-                processorId, newJobModel);
-            stop();
-          } else {
-            // stop current work
-            if (coordinatorListener != null) {
-              coordinatorListener.onJobModelExpired();
-            }
-            // update ZK and wait for all the processors to get this new version
-            barrier.join(jobModelVersion, processorId);
+        if (!newJobModel.getContainers().containsKey(processorId)) {
+          LOG.info("New JobModel does not contain pid={}. Stopping this processor. New JobModel: {}",
+              processorId, newJobModel);
+          stop();
+        } else {
+          // stop current work
+          if (coordinatorListener != null) {
+            coordinatorListener.onJobModelExpired();
           }
-        });
+          // update ZK and wait for all the processors to get this new version
+          barrier.join(jobModelVersion, processorId);
+        }
+      });
     }
 
     @Override
@@ -607,10 +607,10 @@
           LOG.info("Cancelling all scheduled actions in session expiration for processorId: {}.", processorId);
           debounceTimer.cancelAllScheduledActions();
           debounceTimer.scheduleAfterDebounceTime(ZK_SESSION_EXPIRED, 0, () -> {
-              if (coordinatorListener != null) {
-                coordinatorListener.onJobModelExpired();
-              }
-            });
+            if (coordinatorListener != null) {
+              coordinatorListener.onJobModelExpired();
+            }
+          });
 
           return;
         case Disconnected:
diff --git a/samza-core/src/main/scala/org/apache/samza/checkpoint/CheckpointTool.scala b/samza-core/src/main/scala/org/apache/samza/checkpoint/CheckpointTool.scala
index 4db807e..7936c0a 100644
--- a/samza-core/src/main/scala/org/apache/samza/checkpoint/CheckpointTool.scala
+++ b/samza-core/src/main/scala/org/apache/samza/checkpoint/CheckpointTool.scala
@@ -20,7 +20,6 @@
 package org.apache.samza.checkpoint
 
 import java.io.FileInputStream
-import java.net.URI
 import java.util
 import java.util.Properties
 import java.util.regex.Pattern
@@ -82,12 +81,12 @@
   type TaskNameToCheckpointMap = Map[TaskName, Map[SystemStreamPartition, String]]
 
   class CheckpointToolCommandLine extends CommandLine with Logging {
-    val newOffsetsOpt: ArgumentAcceptingOptionSpec[URI] =
-      parser.accepts("new-offsets", "URI of file (e.g. file:///some/local/path.properties) " +
+    val newOffsetsOpt: ArgumentAcceptingOptionSpec[String] =
+      parser.accepts("new-offsets", "Location of file (e.g. /some/local/path.properties) " +
                                     "containing offsets to write to the job's checkpoint topic. " +
                                     "If not given, this tool prints out the current offsets.")
             .withRequiredArg
-            .ofType(classOf[URI])
+            .ofType(classOf[String])
             .describedAs("path")
 
     var newOffsets: TaskNameToCheckpointMap = _
@@ -121,7 +120,7 @@
     override def loadConfig(options: OptionSet): Config = {
       val config = super.loadConfig(options)
       if (options.has(newOffsetsOpt)) {
-        val newOffsetsInputStream = new FileInputStream(options.valueOf(newOffsetsOpt).getPath)
+        val newOffsetsInputStream = new FileInputStream(options.valueOf(newOffsetsOpt))
         val properties = new Properties()
 
         properties.load(newOffsetsInputStream)
diff --git a/samza-core/src/main/scala/org/apache/samza/container/SamzaContainer.scala b/samza-core/src/main/scala/org/apache/samza/container/SamzaContainer.scala
index 6fab351..83ce3a1 100644
--- a/samza-core/src/main/scala/org/apache/samza/container/SamzaContainer.scala
+++ b/samza-core/src/main/scala/org/apache/samza/container/SamzaContainer.scala
@@ -52,6 +52,7 @@
 import org.apache.samza.SamzaException
 import org.apache.samza.clustermanager.StandbyTaskUtil
 
+import scala.collection.JavaConversions
 import scala.collection.JavaConverters._
 
 object SamzaContainer extends Logging {
@@ -587,7 +588,7 @@
           offsetManager = offsetManager,
           storageManager = storageManager,
           tableManager = tableManager,
-          systemStreamPartitions = taskSSPs -- taskSideInputSSPs,
+          systemStreamPartitions = JavaConversions.setAsJavaSet(taskSSPs -- taskSideInputSSPs),
           exceptionHandler = TaskInstanceExceptionHandler(taskInstanceMetrics.get(taskName).get, taskConfig),
           jobModel = jobModel,
           streamMetadataCache = streamMetadataCache,
diff --git a/samza-core/src/main/scala/org/apache/samza/container/TaskInstance.scala b/samza-core/src/main/scala/org/apache/samza/container/TaskInstance.scala
index 37aaeff..2ebe465 100644
--- a/samza-core/src/main/scala/org/apache/samza/container/TaskInstance.scala
+++ b/samza-core/src/main/scala/org/apache/samza/container/TaskInstance.scala
@@ -20,7 +20,7 @@
 package org.apache.samza.container
 
 
-import java.util.{Objects, Optional}
+import java.util.{Collections, Objects, Optional}
 import java.util.concurrent.ScheduledExecutorService
 
 import org.apache.samza.SamzaException
@@ -38,7 +38,7 @@
 
 import scala.collection.JavaConversions._
 import scala.collection.JavaConverters._
-import scala.collection.Map
+import scala.collection.{JavaConverters, Map}
 
 class TaskInstance(
   val task: Any,
@@ -47,10 +47,10 @@
   systemAdmins: SystemAdmins,
   consumerMultiplexer: SystemConsumers,
   collector: TaskInstanceCollector,
-  val offsetManager: OffsetManager = new OffsetManager,
+  override val offsetManager: OffsetManager = new OffsetManager,
   storageManager: TaskStorageManager = null,
   tableManager: TableManager = null,
-  val systemStreamPartitions: Set[SystemStreamPartition] = Set(),
+  val systemStreamPartitions: java.util.Set[SystemStreamPartition] = Collections.emptySet(),
   val exceptionHandler: TaskInstanceExceptionHandler = new TaskInstanceExceptionHandler,
   jobModel: JobModel = null,
   streamMetadataCache: StreamMetadataCache = null,
@@ -60,16 +60,16 @@
   containerContext: ContainerContext,
   applicationContainerContextOption: Option[ApplicationContainerContext],
   applicationTaskContextFactoryOption: Option[ApplicationTaskContextFactory[ApplicationTaskContext]],
-  externalContextOption: Option[ExternalContext]) extends Logging {
+  externalContextOption: Option[ExternalContext]) extends Logging with RunLoopTask {
 
   val taskName: TaskName = taskModel.getTaskName
   val isInitableTask = task.isInstanceOf[InitableTask]
-  val isWindowableTask = task.isInstanceOf[WindowableTask]
   val isEndOfStreamListenerTask = task.isInstanceOf[EndOfStreamListenerTask]
   val isClosableTask = task.isInstanceOf[ClosableTask]
-  val isAsyncTask = task.isInstanceOf[AsyncStreamTask]
 
-  val epochTimeScheduler: EpochTimeScheduler = EpochTimeScheduler.create(timerExecutor)
+  override val isWindowableTask = task.isInstanceOf[WindowableTask]
+
+  override val epochTimeScheduler: EpochTimeScheduler = EpochTimeScheduler.create(timerExecutor)
 
   private val kvStoreSupplier = ScalaJavaUtil.toJavaFunction(
     (storeName: String) => {
@@ -99,7 +99,7 @@
   private val config: Config = jobContext.getConfig
 
   val streamConfig: StreamConfig = new StreamConfig(config)
-  val intermediateStreams: Set[String] = streamConfig.getStreamIds.filter(streamConfig.getIsIntermediateStream).toSet
+  override val intermediateStreams: java.util.Set[String] = JavaConverters.setAsJavaSetConverter(streamConfig.getStreamIds.filter(streamConfig.getIsIntermediateStream)).asJava
 
   val streamsToDeleteCommittedMessages: Set[String] = streamConfig.getStreamIds.filter(streamConfig.getDeleteCommittedMessages).map(streamConfig.getPhysicalName).toSet
 
@@ -165,7 +165,7 @@
   }
 
   def process(envelope: IncomingMessageEnvelope, coordinator: ReadableCoordinator,
-    callbackFactory: TaskCallbackFactory = null) {
+    callbackFactory: TaskCallbackFactory) {
     metrics.processes.inc
 
     val incomingMessageSsp = envelope.getSystemStreamPartition
@@ -181,22 +181,10 @@
       trace("Processing incoming message envelope for taskName and SSP: %s, %s"
         format (taskName, incomingMessageSsp))
 
-      if (isAsyncTask) {
-        exceptionHandler.maybeHandle {
-          val callback = callbackFactory.createCallback()
-          task.asInstanceOf[AsyncStreamTask].processAsync(envelope, collector, coordinator, callback)
-        }
-      } else {
-        exceptionHandler.maybeHandle {
-          task.asInstanceOf[StreamTask].process(envelope, collector, coordinator)
-        }
-
-        trace("Updating offset map for taskName, SSP and offset: %s, %s, %s"
-          format(taskName, incomingMessageSsp, envelope.getOffset))
-
-        offsetManager.update(taskName, incomingMessageSsp, envelope.getOffset)
+      exceptionHandler.maybeHandle {
+        val callback = callbackFactory.createCallback()
+        task.asInstanceOf[AsyncStreamTask].processAsync(envelope, collector, coordinator, callback)
       }
-
     }
   }
 
diff --git a/samza-core/src/main/scala/org/apache/samza/diagnostics/DiagnosticsManager.java b/samza-core/src/main/scala/org/apache/samza/diagnostics/DiagnosticsManager.java
index 9131142..f77dab8 100644
--- a/samza-core/src/main/scala/org/apache/samza/diagnostics/DiagnosticsManager.java
+++ b/samza-core/src/main/scala/org/apache/samza/diagnostics/DiagnosticsManager.java
@@ -142,6 +142,7 @@
     this.autosizingEnabled = autosizingEnabled;
 
     resetTime = Instant.now();
+    this.systemProducer.register(getClass().getSimpleName());
 
     try {
       ReflectionUtil.getObjWithArgs("org.apache.samza.logging.log4j.SimpleDiagnosticsAppender",
@@ -161,6 +162,7 @@
   }
 
   public void start() {
+    this.systemProducer.start();
     this.scheduler.scheduleWithFixedDelay(new DiagnosticsStreamPublisher(), 0, DEFAULT_PUBLISH_PERIOD.getSeconds(),
         TimeUnit.SECONDS);
   }
@@ -175,6 +177,7 @@
       LOG.warn("Unable to terminate scheduler");
       scheduler.shutdownNow();
     }
+    this.systemProducer.stop();
   }
 
   public void addExceptionEvent(DiagnosticsExceptionEvent diagnosticsExceptionEvent) {
diff --git a/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporter.scala b/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporter.scala
index f9cf819..7a0d79f 100644
--- a/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporter.scala
+++ b/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporter.scala
@@ -43,12 +43,12 @@
  * taskName // container_567890
  * host // eat1-app128.gird
  * version // 0.0.1
-  * blacklist // Regex of metrics to ignore when flushing
+ * blacklist // Regex of metrics to ignore when flushing
  */
 class MetricsSnapshotReporter(
   producer: SystemProducer,
   out: SystemStream,
-  pollingInterval: Int,
+  reportingInterval: Int,
   jobName: String,
   jobId: String,
   containerName: String,
@@ -67,8 +67,8 @@
   var registries = List[(String, ReadableMetricsRegistry)]()
   var blacklistedMetrics = Set[String]()
 
-  info("got metrics snapshot reporter properties [job name: %s, job id: %s, containerName: %s, version: %s, samzaVersion: %s, host: %s, pollingInterval %s]"
-    format (jobName, jobId, containerName, version, samzaVersion, host, pollingInterval))
+  info("got metrics snapshot reporter properties [job name: %s, job id: %s, containerName: %s, version: %s, samzaVersion: %s, host: %s, reportingInterval %s]"
+    format(jobName, jobId, containerName, version, samzaVersion, host, reportingInterval))
 
   def start {
     info("Starting producer.")
@@ -77,7 +77,7 @@
 
     info("Starting reporter timer.")
 
-    executor.scheduleWithFixedDelay(this, 0, pollingInterval, TimeUnit.SECONDS)
+    executor.scheduleWithFixedDelay(this, 0, reportingInterval, TimeUnit.SECONDS)
   }
 
   def register(source: String, registry: ReadableMetricsRegistry) {
@@ -91,7 +91,7 @@
   def stop = {
 
     // Scheduling an event with 0 delay to ensure flushing of metrics one last time before shutdown
-    executor.schedule(this,0, TimeUnit.SECONDS)
+    executor.schedule(this, 0, TimeUnit.SECONDS)
 
     info("Stopping reporter timer.")
     // Allow the scheduled task above to finish, and block for termination (for max 60 seconds)
@@ -106,9 +106,20 @@
     }
   }
 
-  def run {
-    debug("Begin flushing metrics.")
+  def run() {
+    try {
+      innerRun()
+    } catch {
+      case e: Exception =>
+        // Ignore all exceptions - because subsequent executions of this scheduled task will be suppressed
+        // by the executor if the current task throws an unhandled exception.
+        warn("Error while reporting metrics. Will retry in " + reportingInterval + " seconds.", e)
+    }
 
+  }
+
+  def innerRun(): Unit = {
+    debug("Begin flushing metrics.")
     for ((source, registry) <- registries) {
       debug("Flushing metrics for %s." format source)
 
@@ -140,7 +151,7 @@
         val header = new MetricsHeader(jobName, jobId, containerName, execEnvironmentContainerId, source, version, samzaVersion, host, clock(), resetTime)
         val metrics = new Metrics(metricsMsg)
 
-        debug("Flushing metrics for %s to %s with header and map: header=%s, map=%s." format(source, out, header.getAsMap, metrics.getAsMap))
+        debug("Flushing metrics for %s to %s with header and map: header=%s, map=%s." format(source, out, header.getAsMap, metrics.getAsMap()))
 
         val metricsSnapshot = new MetricsSnapshot(header, metrics)
         val maybeSerialized = if (serializer != null) {
@@ -160,12 +171,9 @@
         }
       }
     }
-
-
     debug("Finished flushing metrics.")
   }
 
-
   def shouldIgnore(group: String, metricName: String) = {
     var isBlacklisted = blacklist.isDefined
     val fullMetricName = group + "." + metricName
@@ -173,7 +181,7 @@
     if (isBlacklisted && !blacklistedMetrics.contains(fullMetricName)) {
       if (fullMetricName.matches(blacklist.get)) {
         blacklistedMetrics += fullMetricName
-        info("Blacklisted metric %s because it matched blacklist regex: %s" format(fullMetricName, blacklist.get))
+        debug("Blacklisted metric %s because it matched blacklist regex: %s" format(fullMetricName, blacklist.get))
       } else {
         isBlacklisted = false
       }
diff --git a/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporterFactory.scala b/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporterFactory.scala
index 441d834..2f9a0ba 100644
--- a/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporterFactory.scala
+++ b/samza-core/src/main/scala/org/apache/samza/metrics/reporter/MetricsSnapshotReporterFactory.scala
@@ -19,53 +19,47 @@
 
 package org.apache.samza.metrics.reporter
 
-import org.apache.samza.util.{Logging, ReflectionUtil, StreamUtil, Util}
 import org.apache.samza.SamzaException
-import org.apache.samza.config.{Config, JobConfig, MetricsConfig, SerializerConfig, StreamConfig, SystemConfig}
-import org.apache.samza.metrics.MetricsReporter
-import org.apache.samza.metrics.MetricsReporterFactory
-import org.apache.samza.metrics.MetricsRegistryMap
-import org.apache.samza.serializers.{MetricsSnapshotSerdeV2, SerdeFactory}
-import org.apache.samza.system.SystemFactory
+import org.apache.samza.config._
+import org.apache.samza.metrics.{MetricsRegistryMap, MetricsReporter, MetricsReporterFactory}
+import org.apache.samza.serializers.{MetricsSnapshotSerdeV2, Serde, SerdeFactory}
+import org.apache.samza.system.{SystemFactory, SystemProducer, SystemStream}
 import org.apache.samza.util.ScalaJavaUtil.JavaOptionals
+import org.apache.samza.util.{Logging, ReflectionUtil, StreamUtil, Util}
 
 class MetricsSnapshotReporterFactory extends MetricsReporterFactory with Logging {
-  def getMetricsReporter(name: String, containerName: String, config: Config): MetricsReporter = {
-    info("Creating new metrics snapshot reporter.")
 
-    val jobConfig = new JobConfig(config)
-    val jobName = JavaOptionals.toRichOptional(jobConfig.getName).toOption
-      .getOrElse(throw new SamzaException("Job name must be defined in config."))
-    val jobId = jobConfig.getJobId
-
-    val metricsConfig = new MetricsConfig(config)
-    val metricsSystemStreamName = JavaOptionals.toRichOptional(metricsConfig.getMetricsSnapshotReporterStream(name))
-      .toOption
-      .getOrElse(throw new SamzaException("No metrics stream defined in config."))
-
-    val systemStream = StreamUtil.getSystemStreamFromNames(metricsSystemStreamName)
-
-    info("Got system stream %s." format systemStream)
-
-    val systemName = systemStream.getSystem
-
+  protected def getProducer(reporterName: String, config: Config, registry: MetricsRegistryMap): SystemProducer = {
     val systemConfig = new SystemConfig(config)
+    val systemName = getSystemStream(reporterName, config).getSystem
     val systemFactoryClassName = JavaOptionals.toRichOptional(systemConfig.getSystemFactory(systemName)).toOption
       .getOrElse(throw new SamzaException("Trying to fetch system factory for system %s, which isn't defined in config." format systemName))
-
     val systemFactory = ReflectionUtil.getObj(systemFactoryClassName, classOf[SystemFactory])
 
     info("Got system factory %s." format systemFactory)
-
-    val registry = new MetricsRegistryMap
-
     val producer = systemFactory.getProducer(systemName, config, registry)
-
     info("Got producer %s." format producer)
+
+    producer
+  }
+
+  protected def getSystemStream(reporterName: String, config: Config): SystemStream = {
+    val metricsConfig = new MetricsConfig(config)
+    val metricsSystemStreamName = JavaOptionals.toRichOptional(metricsConfig.getMetricsSnapshotReporterStream(reporterName))
+      .toOption
+      .getOrElse(throw new SamzaException("No metrics stream defined in config."))
+    val systemStream = StreamUtil.getSystemStreamFromNames(metricsSystemStreamName)
+    info("Got system stream %s." format systemStream)
+    systemStream
+  }
+
+  protected def getSerde(reporterName: String, config: Config): Serde[MetricsSnapshot] = {
     val streamConfig = new StreamConfig(config)
+    val systemConfig = new SystemConfig(config)
+    val systemStream = getSystemStream(reporterName, config)
 
     val streamSerdeName = streamConfig.getStreamMsgSerde(systemStream)
-    val systemSerdeName = systemConfig.getSystemMsgSerde(systemName)
+    val systemSerdeName = systemConfig.getSystemMsgSerde(systemStream.getSystem)
     val serdeName = streamSerdeName.orElse(systemSerdeName.orElse(null))
     val serializerConfig = new SerializerConfig(config)
     val serde = if (serdeName != null) {
@@ -77,29 +71,62 @@
     } else {
       new MetricsSnapshotSerdeV2
     }
-
     info("Got serde %s." format serde)
+    serde
+  }
 
-    val pollingInterval: Int = metricsConfig.getMetricsSnapshotReporterInterval(name)
 
-    info("Setting polling interval to %d" format pollingInterval)
+  protected def getBlacklist(reporterName: String, config: Config): Option[String] = {
+    val metricsConfig = new MetricsConfig(config)
+    val blacklist = JavaOptionals.toRichOptional(metricsConfig.getMetricsSnapshotReporterBlacklist(reporterName)).toOption
+    info("Got blacklist as: %s" format blacklist)
+    blacklist
+  }
 
-    val blacklist = JavaOptionals.toRichOptional(metricsConfig.getMetricsSnapshotReporterBlacklist(name)).toOption
-    info("Setting blacklist to %s" format blacklist)
+  protected def getReportingInterval(reporterName: String, config: Config): Int = {
+    val metricsConfig = new MetricsConfig(config)
+    val reportingInterval = metricsConfig.getMetricsSnapshotReporterInterval(reporterName)
+    info("Got reporting interval: %d" format reportingInterval)
+    reportingInterval
+  }
+
+  protected def getJobId(config: Config): String = {
+    val jobConfig = new JobConfig(config)
+    jobConfig.getJobId
+  }
+
+  protected def getJobName(config: Config): String = {
+    val jobConfig = new JobConfig(config)
+    JavaOptionals.toRichOptional(jobConfig.getName).toOption
+      .getOrElse(throw new SamzaException("Job name must be defined in config."))
+  }
+
+
+  def getMetricsReporter(reporterName: String, containerName: String, config: Config): MetricsReporter = {
+    info("Creating new metrics snapshot reporter.")
+    val registry = new MetricsRegistryMap
+
+    val systemStream = getSystemStream(reporterName, config)
+    val producer = getProducer(reporterName, config, registry)
+    val reportingInterval = getReportingInterval(reporterName, config);
+    val jobName = getJobName(config)
+    val jobId = getJobId(config)
+    val serde = getSerde(reporterName, config)
+    val blacklist = getBlacklist(reporterName, config)
 
     val reporter = new MetricsSnapshotReporter(
       producer,
       systemStream,
-      pollingInterval,
+      reportingInterval,
       jobName,
       jobId,
       containerName,
       Util.getTaskClassVersion(config),
-      Util.getSamzaVersion(),
+      Util.getSamzaVersion,
       Util.getLocalHost.getHostName,
       serde, blacklist)
 
-    reporter.register(this.getClass.getSimpleName.toString, registry)
+    reporter.register(this.getClass.getSimpleName, registry)
 
     reporter
   }
diff --git a/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java b/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java
index 8623e5d..19411b4 100644
--- a/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java
+++ b/samza-core/src/main/scala/org/apache/samza/storage/ContainerStorageManager.java
@@ -149,10 +149,10 @@
   private final int maxChangeLogStreamPartitions; // The partition count of each changelog-stream topic. This is used for validating changelog streams before restoring.
 
   /* Sideinput related parameters */
-  private final Map<String, Set<SystemStream>> sideInputSystemStreams; // Map of sideInput system-streams indexed by store name
-  private final Map<TaskName, Map<String, Set<SystemStreamPartition>>> taskSideInputSSPs;
-  private final Map<SystemStreamPartition, TaskSideInputStorageManager> sideInputStorageManagers; // Map of sideInput storageManagers indexed by ssp, for simpler lookup for process()
-  private final Map<String, SystemConsumer> sideInputConsumers; // Mapping from storeSystemNames to SystemConsumers
+  private final boolean hasSideInputs;
+  // side inputs indexed first by task, then store name
+  private final Map<TaskName, Map<String, Set<SystemStreamPartition>>> taskSideInputStoreSSPs;
+  private final Map<SystemStreamPartition, TaskSideInputHandler> sspSideInputHandlers;
   private SystemConsumers sideInputSystemConsumers;
   private final Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> initialSideInputSSPMetadata
       = new ConcurrentHashMap<>(); // Recorded sspMetadata of the taskSideInputSSPs recorded at start, used to determine when sideInputs are caughtup and container init can proceed
@@ -194,12 +194,16 @@
       Clock clock) {
     this.checkpointManager = checkpointManager;
     this.containerModel = containerModel;
-    this.sideInputSystemStreams = new HashMap<>(sideInputSystemStreams);
-    this.taskSideInputSSPs = getTaskSideInputSSPs(containerModel, sideInputSystemStreams);
+    this.taskSideInputStoreSSPs = getTaskSideInputSSPs(containerModel, sideInputSystemStreams);
+    this.hasSideInputs = this.taskSideInputStoreSSPs.values().stream()
+        .flatMap(m -> m.values().stream())
+        .flatMap(Collection::stream)
+        .findAny()
+        .isPresent();
     this.sspMetadataCache = sspMetadataCache;
     this.changelogSystemStreams = getChangelogSystemStreams(containerModel, changelogSystemStreams); // handling standby tasks
 
-    LOG.info("Starting with changelogSystemStreams = {} sideInputSystemStreams = {}", this.changelogSystemStreams, this.sideInputSystemStreams);
+    LOG.info("Starting with changelogSystemStreams = {} taskSideInputStoreSSPs = {}", this.changelogSystemStreams, this.taskSideInputStoreSSPs);
 
     this.storageEngineFactories = storageEngineFactories;
     this.serdes = serdes;
@@ -238,25 +242,38 @@
     // create taskStores for all tasks in the containerModel and each store in storageEngineFactories
     this.taskStores = createTaskStores(containerModel, jobContext, containerContext, storageEngineFactories, serdes, taskInstanceMetrics, taskInstanceCollectors);
 
+    Set<String> containerChangelogSystems = this.changelogSystemStreams.values().stream()
+        .map(SystemStream::getSystem)
+        .collect(Collectors.toSet());
+
     // create system consumers (1 per store system in changelogSystemStreams), and index it by storeName
-    Map<String, SystemConsumer> storeSystemConsumers = createConsumers(this.changelogSystemStreams.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey,
-        e -> Collections.singleton(e.getValue()))), systemFactories, config, this.samzaContainerMetrics.registry());
+    Map<String, SystemConsumer> storeSystemConsumers = createConsumers(
+        containerChangelogSystems, systemFactories, config, this.samzaContainerMetrics.registry());
     this.storeConsumers = createStoreIndexedMap(this.changelogSystemStreams, storeSystemConsumers);
 
     // creating task restore managers
     this.taskRestoreManagers = createTaskRestoreManagers(systemAdmins, clock, this.samzaContainerMetrics);
 
-    // create sideInput storage managers
-    sideInputStorageManagers = createSideInputStorageManagers(clock);
-
-    // create sideInput consumers indexed by systemName
-    this.sideInputConsumers = createConsumers(this.sideInputSystemStreams, systemFactories, config, this.samzaContainerMetrics.registry());
+    this.sspSideInputHandlers = createSideInputHandlers(clock);
 
     // create SystemConsumers for consuming from taskSideInputSSPs, if sideInputs are being used
-    if (sideInputsPresent()) {
+    if (this.hasSideInputs) {
+      Set<SystemStream> containerSideInputSystemStreams = this.taskSideInputStoreSSPs.values().stream()
+          .flatMap(map -> map.values().stream())
+          .flatMap(Set::stream)
+          .map(SystemStreamPartition::getSystemStream)
+          .collect(Collectors.toSet());
 
-      scala.collection.immutable.Map<SystemStream, SystemStreamMetadata> inputStreamMetadata = streamMetadataCache.getStreamMetadata(JavaConversions.asScalaSet(
-          this.sideInputSystemStreams.values().stream().flatMap(Set::stream).collect(Collectors.toSet())).toSet(), false);
+      Set<String> containerSideInputSystems = containerSideInputSystemStreams.stream()
+          .map(SystemStream::getSystem)
+          .collect(Collectors.toSet());
+
+      // create sideInput consumers indexed by systemName
+      // Mapping from storeSystemNames to SystemConsumers
+      Map<String, SystemConsumer> sideInputConsumers =
+          createConsumers(containerSideInputSystems, systemFactories, config, this.samzaContainerMetrics.registry());
+
+      scala.collection.immutable.Map<SystemStream, SystemStreamMetadata> inputStreamMetadata = streamMetadataCache.getStreamMetadata(JavaConversions.asScalaSet(containerSideInputSystemStreams).toSet(), false);
 
       SystemConsumersMetrics sideInputSystemConsumersMetrics = new SystemConsumersMetrics(samzaContainerMetrics.registry(), SIDEINPUTS_METRICS_PREFIX);
       // we use the same registry as samza-container-metrics
@@ -265,7 +282,7 @@
           sideInputSystemConsumersMetrics.registry(), systemAdmins);
 
       sideInputSystemConsumers =
-          new SystemConsumers(chooser, ScalaJavaUtil.toScalaMap(this.sideInputConsumers), systemAdmins, serdeManager,
+          new SystemConsumers(chooser, ScalaJavaUtil.toScalaMap(sideInputConsumers), systemAdmins, serdeManager,
               sideInputSystemConsumersMetrics, SystemConsumers.DEFAULT_NO_NEW_MESSAGES_TIMEOUT(), SystemConsumers.DEFAULT_DROP_SERIALIZATION_ERROR(),
               TaskConfig.DEFAULT_POLL_INTERVAL_MS, ScalaJavaUtil.toScalaFunction(() -> System.nanoTime()));
     }
@@ -283,12 +300,12 @@
     Map<TaskName, Map<String, Set<SystemStreamPartition>>> taskSideInputSSPs = new HashMap<>();
 
     containerModel.getTasks().forEach((taskName, taskModel) -> {
-        sideInputSystemStreams.keySet().forEach(storeName -> {
-            Set<SystemStreamPartition> taskSideInputs = taskModel.getSystemStreamPartitions().stream().filter(ssp -> sideInputSystemStreams.get(storeName).contains(ssp.getSystemStream())).collect(Collectors.toSet());
-            taskSideInputSSPs.putIfAbsent(taskName, new HashMap<>());
-            taskSideInputSSPs.get(taskName).put(storeName, taskSideInputs);
-          });
+      taskSideInputSSPs.putIfAbsent(taskName, new HashMap<>());
+      sideInputSystemStreams.keySet().forEach(storeName -> {
+        Set<SystemStreamPartition> taskSideInputs = taskModel.getSystemStreamPartitions().stream().filter(ssp -> sideInputSystemStreams.get(storeName).contains(ssp.getSystemStream())).collect(Collectors.toSet());
+        taskSideInputSSPs.get(taskName).put(storeName, taskSideInputs);
       });
+    });
     return taskSideInputSSPs;
   }
 
@@ -312,14 +329,13 @@
     );
 
     getTasks(containerModel, TaskMode.Standby).forEach((taskName, taskModel) -> {
-        changelogSystemStreams.forEach((storeName, systemStream) -> {
-            SystemStreamPartition ssp = new SystemStreamPartition(systemStream, taskModel.getChangelogPartition());
-            changelogSSPToStore.remove(ssp);
-            this.taskSideInputSSPs.putIfAbsent(taskName, new HashMap<>());
-            this.sideInputSystemStreams.put(storeName, Collections.singleton(ssp.getSystemStream()));
-            this.taskSideInputSSPs.get(taskName).put(storeName, Collections.singleton(ssp));
-          });
+      this.taskSideInputStoreSSPs.putIfAbsent(taskName, new HashMap<>());
+      changelogSystemStreams.forEach((storeName, systemStream) -> {
+        SystemStreamPartition ssp = new SystemStreamPartition(systemStream, taskModel.getChangelogPartition());
+        changelogSSPToStore.remove(ssp);
+        this.taskSideInputStoreSSPs.get(taskName).put(storeName, Collections.singleton(ssp));
       });
+    });
 
     // changelogSystemStreams correspond only to active tasks (since those of standby-tasks moved to sideInputs above)
     return MapUtils.invertMap(changelogSSPToStore).entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, x -> x.getValue().getSystemStream()));
@@ -329,28 +345,21 @@
   /**
    *  Creates SystemConsumer objects for store restoration, creating one consumer per system.
    */
-  private static Map<String, SystemConsumer> createConsumers(Map<String, Set<SystemStream>> systemStreams,
+  private static Map<String, SystemConsumer> createConsumers(Set<String> storeSystems,
       Map<String, SystemFactory> systemFactories, Config config, MetricsRegistry registry) {
-    // Determine the set of systems being used across all stores
-    Set<String> storeSystems =
-        systemStreams.values().stream().flatMap(Set::stream).map(SystemStream::getSystem).collect(Collectors.toSet());
-
     // Create one consumer for each system in use, map with one entry for each such system
-    Map<String, SystemConsumer> storeSystemConsumers = new HashMap<>();
-
+    Map<String, SystemConsumer> consumers = new HashMap<>();
 
     // Iterate over the list of storeSystems and create one sysConsumer per system
     for (String storeSystemName : storeSystems) {
       SystemFactory systemFactory = systemFactories.get(storeSystemName);
       if (systemFactory == null) {
-        throw new SamzaException("Changelog system " + storeSystemName + " does not exist in config");
+        throw new SamzaException("System " + storeSystemName + " does not exist in config");
       }
-      storeSystemConsumers.put(storeSystemName,
-          systemFactory.getConsumer(storeSystemName, config, registry));
+      consumers.put(storeSystemName, systemFactory.getConsumer(storeSystemName, config, registry));
     }
 
-    return storeSystemConsumers;
-
+    return consumers;
   }
 
   private static Map<String, SystemConsumer> createStoreIndexedMap(Map<String, SystemStream> changelogSystemStreams,
@@ -368,13 +377,13 @@
   private Map<TaskName, TaskRestoreManager> createTaskRestoreManagers(SystemAdmins systemAdmins, Clock clock, SamzaContainerMetrics samzaContainerMetrics) {
     Map<TaskName, TaskRestoreManager> taskRestoreManagers = new HashMap<>();
     containerModel.getTasks().forEach((taskName, taskModel) -> {
-        taskRestoreManagers.put(taskName,
-            TaskRestoreManagerFactory.create(
-                taskModel, changelogSystemStreams, getNonSideInputStores(taskName), systemAdmins,
-                streamMetadataCache, sspMetadataCache, storeConsumers, maxChangeLogStreamPartitions,
-                loggedStoreBaseDirectory, nonLoggedStoreBaseDirectory, config, clock));
-        samzaContainerMetrics.addStoresRestorationGauge(taskName);
-      });
+      taskRestoreManagers.put(taskName,
+          TaskRestoreManagerFactory.create(
+              taskModel, changelogSystemStreams, getNonSideInputStores(taskName), systemAdmins,
+              streamMetadataCache, sspMetadataCache, storeConsumers, maxChangeLogStreamPartitions,
+              loggedStoreBaseDirectory, nonLoggedStoreBaseDirectory, config, clock));
+      samzaContainerMetrics.addStoresRestorationGauge(taskName);
+    });
     return taskRestoreManagers;
   }
 
@@ -406,7 +415,7 @@
 
       for (String storeName : storageEngineFactories.keySet()) {
 
-        StorageEngineFactory.StoreMode storeMode = this.sideInputSystemStreams.containsKey(storeName) ?
+        StorageEngineFactory.StoreMode storeMode = this.taskSideInputStoreSSPs.get(taskName).containsKey(storeName) ?
             StorageEngineFactory.StoreMode.ReadWrite : StorageEngineFactory.StoreMode.BulkLoad;
 
         StorageEngine storageEngine =
@@ -476,7 +485,7 @@
     // Use the logged-store-base-directory for change logged stores and sideInput stores, and non-logged-store-base-dir
     // for non logged stores
     File storeDirectory;
-    if (changeLogSystemStreamPartition != null || sideInputSystemStreams.containsKey(storeName)) {
+    if (changeLogSystemStreamPartition != null || this.taskSideInputStoreSSPs.get(taskName).containsKey(storeName)) {
       storeDirectory = storageManagerUtil.getTaskStoreDir(this.loggedStoreBaseDirectory, storeName, taskName,
           taskModel.getTaskMode());
     } else {
@@ -522,136 +531,139 @@
 
   // Create sideInput store processors, one per store per task
   private Map<TaskName, Map<String, SideInputsProcessor>> createSideInputProcessors(StorageConfig config,
-      ContainerModel containerModel, Map<String, Set<SystemStream>> sideInputSystemStreams,
-      Map<TaskName, TaskInstanceMetrics> taskInstanceMetrics) {
+      ContainerModel containerModel, Map<TaskName, TaskInstanceMetrics> taskInstanceMetrics) {
 
     Map<TaskName, Map<String, SideInputsProcessor>> sideInputStoresToProcessors = new HashMap<>();
     containerModel.getTasks().forEach((taskName, taskModel) -> {
-        sideInputStoresToProcessors.put(taskName, new HashMap<>());
-        TaskMode taskMode = taskModel.getTaskMode();
+      sideInputStoresToProcessors.put(taskName, new HashMap<>());
+      TaskMode taskMode = taskModel.getTaskMode();
 
-        for (String storeName : sideInputSystemStreams.keySet()) {
+      for (String storeName : this.taskSideInputStoreSSPs.get(taskName).keySet()) {
 
-          SideInputsProcessor sideInputsProcessor;
-          Optional<String> sideInputsProcessorSerializedInstance =
-              config.getSideInputsProcessorSerializedInstance(storeName);
+        SideInputsProcessor sideInputsProcessor;
+        Optional<String> sideInputsProcessorSerializedInstance =
+            config.getSideInputsProcessorSerializedInstance(storeName);
 
-          if (sideInputsProcessorSerializedInstance.isPresent()) {
+        if (sideInputsProcessorSerializedInstance.isPresent()) {
 
-            sideInputsProcessor = SerdeUtils.deserialize("Side Inputs Processor", sideInputsProcessorSerializedInstance.get());
-            LOG.info("Using serialized side-inputs-processor for store: {}, task: {}", storeName, taskName);
+          sideInputsProcessor = SerdeUtils.deserialize("Side Inputs Processor", sideInputsProcessorSerializedInstance.get());
+          LOG.info("Using serialized side-inputs-processor for store: {}, task: {}", storeName, taskName);
 
-          } else if (config.getSideInputsProcessorFactory(storeName).isPresent()) {
-            String sideInputsProcessorFactoryClassName = config.getSideInputsProcessorFactory(storeName).get();
-            SideInputsProcessorFactory sideInputsProcessorFactory =
-                ReflectionUtil.getObj(sideInputsProcessorFactoryClassName, SideInputsProcessorFactory.class);
-            sideInputsProcessor = sideInputsProcessorFactory.getSideInputsProcessor(config, taskInstanceMetrics.get(taskName).registry());
-            LOG.info("Using side-inputs-processor from factory: {} for store: {}, task: {}", config.getSideInputsProcessorFactory(storeName).get(), storeName, taskName);
+        } else if (config.getSideInputsProcessorFactory(storeName).isPresent()) {
+          String sideInputsProcessorFactoryClassName = config.getSideInputsProcessorFactory(storeName).get();
+          SideInputsProcessorFactory sideInputsProcessorFactory =
+              ReflectionUtil.getObj(sideInputsProcessorFactoryClassName, SideInputsProcessorFactory.class);
+          sideInputsProcessor = sideInputsProcessorFactory.getSideInputsProcessor(config, taskInstanceMetrics.get(taskName).registry());
+          LOG.info("Using side-inputs-processor from factory: {} for store: {}, task: {}", config.getSideInputsProcessorFactory(storeName).get(), storeName, taskName);
 
-          } else {
-            // if this is a active-task with a side-input store but no sideinput-processor-factory defined in config, we rely on upstream validations to fail the deploy
+        } else {
+          // if this is a active-task with a side-input store but no sideinput-processor-factory defined in config, we rely on upstream validations to fail the deploy
 
-            // if this is a standby-task and the store is a non-side-input changelog store
-            // we creating identity sideInputProcessor for stores of standbyTasks
-            // have to use the right serde because the sideInput stores are created
+          // if this is a standby-task and the store is a non-side-input changelog store
+          // we creating identity sideInputProcessor for stores of standbyTasks
+          // have to use the right serde because the sideInput stores are created
 
-            Serde keySerde = serdes.get(config.getStorageKeySerde(storeName)
-                .orElseThrow(() -> new SamzaException("Could not find storage key serde for store: " + storeName)));
-            Serde msgSerde = serdes.get(config.getStorageMsgSerde(storeName)
-                .orElseThrow(() -> new SamzaException("Could not find storage msg serde for store: " + storeName)));
-            sideInputsProcessor = new SideInputsProcessor() {
-              @Override
-              public Collection<Entry<?, ?>> process(IncomingMessageEnvelope message, KeyValueStore store) {
-                // Ignore message if the key is null
-                if (message.getKey() == null) {
-                  return ImmutableList.of();
-                } else {
-                  // Skip serde if the message is null
-                  return ImmutableList.of(new Entry<>(keySerde.fromBytes((byte[]) message.getKey()),
-                      message.getMessage() == null ? null : msgSerde.fromBytes((byte[]) message.getMessage())));
-                }
+          Serde keySerde = serdes.get(config.getStorageKeySerde(storeName)
+              .orElseThrow(() -> new SamzaException("Could not find storage key serde for store: " + storeName)));
+          Serde msgSerde = serdes.get(config.getStorageMsgSerde(storeName)
+              .orElseThrow(() -> new SamzaException("Could not find storage msg serde for store: " + storeName)));
+          sideInputsProcessor = new SideInputsProcessor() {
+            @Override
+            public Collection<Entry<?, ?>> process(IncomingMessageEnvelope message, KeyValueStore store) {
+              // Ignore message if the key is null
+              if (message.getKey() == null) {
+                return ImmutableList.of();
+              } else {
+                // Skip serde if the message is null
+                return ImmutableList.of(new Entry<>(keySerde.fromBytes((byte[]) message.getKey()),
+                    message.getMessage() == null ? null : msgSerde.fromBytes((byte[]) message.getMessage())));
               }
-            };
-            LOG.info("Using identity side-inputs-processor for store: {}, task: {}", storeName, taskName);
-          }
-
-          sideInputStoresToProcessors.get(taskName).put(storeName, sideInputsProcessor);
+            }
+          };
+          LOG.info("Using identity side-inputs-processor for store: {}, task: {}", storeName, taskName);
         }
-      });
+
+        sideInputStoresToProcessors.get(taskName).put(storeName, sideInputsProcessor);
+      }
+    });
 
     return sideInputStoresToProcessors;
   }
 
   // Create task sideInput storage managers, one per task, index by the SSP they are responsible for consuming
-  private Map<SystemStreamPartition, TaskSideInputStorageManager> createSideInputStorageManagers(Clock clock) {
+  private Map<SystemStreamPartition, TaskSideInputHandler> createSideInputHandlers(Clock clock) {
     // creating sideInput store processors, one per store per task
     Map<TaskName, Map<String, SideInputsProcessor>> taskSideInputProcessors =
-        createSideInputProcessors(new StorageConfig(config), this.containerModel, this.sideInputSystemStreams,
-            this.taskInstanceMetrics);
+        createSideInputProcessors(new StorageConfig(config), this.containerModel, this.taskInstanceMetrics);
 
-    Map<SystemStreamPartition, TaskSideInputStorageManager> sideInputStorageManagers = new HashMap<>();
+    Map<SystemStreamPartition, TaskSideInputHandler> handlers = new HashMap<>();
 
-    if (sideInputsPresent()) {
+    if (this.hasSideInputs) {
       containerModel.getTasks().forEach((taskName, taskModel) -> {
 
-          Map<String, StorageEngine> sideInputStores = getSideInputStores(taskName);
-          Map<String, Set<SystemStreamPartition>> sideInputStoresToSSPs = new HashMap<>();
+        Map<String, StorageEngine> sideInputStores = getSideInputStores(taskName);
+        Map<String, Set<SystemStreamPartition>> sideInputStoresToSSPs = new HashMap<>();
 
-          for (String storeName : sideInputStores.keySet()) {
-            Set<SystemStreamPartition> storeSSPs = taskSideInputSSPs.get(taskName).get(storeName);
-            sideInputStoresToSSPs.put(storeName, storeSSPs);
-          }
+        for (String storeName : sideInputStores.keySet()) {
+          Set<SystemStreamPartition> storeSSPs = this.taskSideInputStoreSSPs.get(taskName).get(storeName);
+          sideInputStoresToSSPs.put(storeName, storeSSPs);
+        }
 
-          TaskSideInputStorageManager taskSideInputStorageManager =
-              new TaskSideInputStorageManager(taskName, taskModel.getTaskMode(), streamMetadataCache,
-                  loggedStoreBaseDirectory, sideInputStores, taskSideInputProcessors.get(taskName), sideInputStoresToSSPs,
-                  systemAdmins, config, clock);
+        TaskSideInputHandler taskSideInputHandler = new TaskSideInputHandler(taskName,
+            taskModel.getTaskMode(),
+            loggedStoreBaseDirectory,
+            sideInputStores,
+            sideInputStoresToSSPs,
+            taskSideInputProcessors.get(taskName),
+            this.systemAdmins,
+            this.streamMetadataCache,
+            clock);
 
-          sideInputStoresToSSPs.values().stream().flatMap(Set::stream).forEach(ssp -> {
-              sideInputStorageManagers.put(ssp, taskSideInputStorageManager);
-            });
-
-          LOG.info("Created taskSideInputStorageManager for task {}, sideInputStores {} and loggedStoreBaseDirectory {}",
-              taskName, sideInputStores, loggedStoreBaseDirectory);
+        sideInputStoresToSSPs.values().stream().flatMap(Set::stream).forEach(ssp -> {
+          handlers.put(ssp, taskSideInputHandler);
         });
+
+        LOG.info("Created TaskSideInputHandler for task {}, sideInputStores {} and loggedStoreBaseDirectory {}",
+            taskName, sideInputStores, loggedStoreBaseDirectory);
+      });
     }
-    return sideInputStorageManagers;
+    return handlers;
   }
 
   private Map<String, StorageEngine> getSideInputStores(TaskName taskName) {
     return taskStores.get(taskName).entrySet().stream().
-        filter(e -> sideInputSystemStreams.containsKey(e.getKey())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+        filter(e -> this.taskSideInputStoreSSPs.get(taskName).containsKey(e.getKey())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
   }
 
   private Map<String, StorageEngine> getNonSideInputStores(TaskName taskName) {
     return taskStores.get(taskName).entrySet().stream().
-        filter(e -> !sideInputSystemStreams.containsKey(e.getKey())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+        filter(e -> !this.taskSideInputStoreSSPs.get(taskName).containsKey(e.getKey())).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
   }
 
-  private Set<TaskSideInputStorageManager> getSideInputStorageManagers() {
-    return this.sideInputStorageManagers.values().stream().collect(Collectors.toSet());
+  private Set<TaskSideInputHandler> getSideInputHandlers() {
+    return this.sspSideInputHandlers.values().stream().collect(Collectors.toSet());
   }
 
   public void start() throws SamzaException, InterruptedException {
     Map<SystemStreamPartition, String> checkpointedChangelogSSPOffsets = new HashMap<>();
     if (new TaskConfig(config).getTransactionalStateRestoreEnabled()) {
       getTasks(containerModel, TaskMode.Active).forEach((taskName, taskModel) -> {
-          if (checkpointManager != null) {
-            Set<SystemStream> changelogSystemStreams = new HashSet<>(this.changelogSystemStreams.values());
-            Checkpoint checkpoint = checkpointManager.readLastCheckpoint(taskName);
-            if (checkpoint != null) {
-              checkpoint.getOffsets().forEach((ssp, offset) -> {
-                  if (changelogSystemStreams.contains(new SystemStream(ssp.getSystem(), ssp.getStream()))) {
-                    checkpointedChangelogSSPOffsets.put(ssp, offset);
-                  }
-                });
-            }
+        if (checkpointManager != null) {
+          Set<SystemStream> changelogSystemStreams = new HashSet<>(this.changelogSystemStreams.values());
+          Checkpoint checkpoint = checkpointManager.readLastCheckpoint(taskName);
+          if (checkpoint != null) {
+            checkpoint.getOffsets().forEach((ssp, offset) -> {
+              if (changelogSystemStreams.contains(new SystemStream(ssp.getSystem(), ssp.getStream()))) {
+                checkpointedChangelogSSPOffsets.put(ssp, offset);
+              }
+            });
           }
-        });
+        }
+      });
     }
     LOG.info("Checkpointed changelog ssp offsets: {}", checkpointedChangelogSSPOffsets);
     restoreStores(checkpointedChangelogSSPOffsets);
-    if (sideInputsPresent()) {
+    if (this.hasSideInputs) {
       startSideInputs();
     }
   }
@@ -676,9 +688,9 @@
 
     // Submit restore callable for each taskInstance
     this.taskRestoreManagers.forEach((taskInstance, taskRestoreManager) -> {
-        taskRestoreFutures.add(executorService.submit(
-            new TaskRestoreCallable(this.samzaContainerMetrics, taskInstance, taskRestoreManager)));
-      });
+      taskRestoreFutures.add(executorService.submit(
+          new TaskRestoreCallable(this.samzaContainerMetrics, taskInstance, taskRestoreManager)));
+    });
 
     // loop-over the future list to wait for each thread to finish, catch any exceptions during restore and throw
     // as samza exceptions
@@ -714,7 +726,7 @@
     LOG.info("SideInput Restore started");
 
     // initialize the sideInputStorageManagers
-    getSideInputStorageManagers().forEach(sideInputStorageManager -> sideInputStorageManager.init());
+    getSideInputHandlers().forEach(TaskSideInputHandler::init);
 
     // start the checkpointing thread at the commit-ms frequency
     TaskConfig taskConfig = new TaskConfig(config);
@@ -722,7 +734,7 @@
       @Override
       public void run() {
         try {
-          getSideInputStorageManagers().forEach(sideInputStorageManager -> sideInputStorageManager.flush());
+          getSideInputHandlers().forEach(TaskSideInputHandler::flush);
         } catch (Exception e) {
           LOG.error("Exception during flushing sideInputs", e);
           sideInputException = e;
@@ -731,20 +743,21 @@
     }, 0, taskConfig.getCommitMs(), TimeUnit.MILLISECONDS);
 
     // set the latch to the number of sideInput SSPs
-    this.sideInputsCaughtUp = new CountDownLatch(this.sideInputStorageManagers.keySet().size());
+    this.sideInputsCaughtUp = new CountDownLatch(this.sspSideInputHandlers.keySet().size());
 
     // register all sideInput SSPs with the consumers
-    for (SystemStreamPartition ssp : sideInputStorageManagers.keySet()) {
-      String startingOffset = sideInputStorageManagers.get(ssp).getStartingOffset(ssp);
+    for (SystemStreamPartition ssp : this.sspSideInputHandlers.keySet()) {
+      String startingOffset = this.sspSideInputHandlers.get(ssp).getStartingOffset(ssp);
 
       if (startingOffset == null) {
-        throw new SamzaException("No offset defined for SideInput SystemStreamPartition : " + ssp);
+        throw new SamzaException(
+            "No starting offset could be obtained for SideInput SystemStreamPartition : " + ssp + ". Consumer cannot start.");
       }
 
       // register startingOffset with the sysConsumer and register a metric for it
       sideInputSystemConsumers.register(ssp, startingOffset);
-      taskInstanceMetrics.get(sideInputStorageManagers.get(ssp).getTaskName()).addOffsetGauge(
-          ssp, ScalaJavaUtil.toScalaFunction(() -> sideInputStorageManagers.get(ssp).getLastProcessedOffset(ssp)));
+      taskInstanceMetrics.get(this.sspSideInputHandlers.get(ssp).getTaskName()).addOffsetGauge(
+          ssp, ScalaJavaUtil.toScalaFunction(() -> this.sspSideInputHandlers.get(ssp).getLastProcessedOffset(ssp)));
 
       SystemStreamMetadata systemStreamMetadata = streamMetadataCache.getSystemStreamMetadata(ssp.getSystemStream(), false);
       SystemStreamMetadata.SystemStreamPartitionMetadata sspMetadata =
@@ -765,26 +778,26 @@
 
     // submit the sideInput read runnable
       sideInputsReadExecutor.submit(() -> {
-          try {
-            while (!shouldShutdown) {
-              IncomingMessageEnvelope envelope = sideInputSystemConsumers.choose(true);
+        try {
+          while (!shouldShutdown) {
+            IncomingMessageEnvelope envelope = sideInputSystemConsumers.choose(true);
 
-              if (envelope != null) {
-                if (!envelope.isEndOfStream()) {
-                  sideInputStorageManagers.get(envelope.getSystemStreamPartition()).process(envelope);
-                }
-
-                checkSideInputCaughtUp(envelope.getSystemStreamPartition(), envelope.getOffset(),
-                    SystemStreamMetadata.OffsetType.NEWEST, envelope.isEndOfStream());
-              } else {
-                LOG.trace("No incoming message was available");
+            if (envelope != null) {
+              if (!envelope.isEndOfStream()) {
+                this.sspSideInputHandlers.get(envelope.getSystemStreamPartition()).process(envelope);
               }
+
+              checkSideInputCaughtUp(envelope.getSystemStreamPartition(), envelope.getOffset(),
+                  SystemStreamMetadata.OffsetType.NEWEST, envelope.isEndOfStream());
+            } else {
+              LOG.trace("No incoming message was available");
             }
-          } catch (Exception e) {
-            LOG.error("Exception in reading sideInputs", e);
-            sideInputException = e;
           }
-        });
+        } catch (Exception e) {
+          LOG.error("Exception in reading sideInputs", e);
+          sideInputException = e;
+        }
+      });
 
       // Make the main thread wait until all sideInputs have been caughtup or an exception was thrown
       while (!shouldShutdown && sideInputException == null &&
@@ -811,10 +824,6 @@
     LOG.info("SideInput Restore complete");
   }
 
-  private boolean sideInputsPresent() {
-    return !this.sideInputSystemStreams.isEmpty();
-  }
-
   // Method to check if the given offset means the stream is caught up for reads
   private void checkSideInputCaughtUp(SystemStreamPartition ssp, String offset, SystemStreamMetadata.OffsetType offsetType, boolean isEndOfStream) {
 
@@ -891,7 +900,7 @@
     this.shouldShutdown = true;
 
     // stop all sideinput consumers and stores
-    if (sideInputsPresent()) {
+    if (this.hasSideInputs) {
       sideInputsReadExecutor.shutdownNow();
 
       this.sideInputSystemConsumers.stop();
@@ -906,7 +915,7 @@
       }
 
       // stop all sideInputStores -- this will perform one last flush on the KV stores, and write the offset file
-      this.getSideInputStorageManagers().forEach(sideInputStorageManager -> sideInputStorageManager.stop());
+      this.getSideInputHandlers().forEach(TaskSideInputHandler::stop);
     }
     LOG.info("Shutdown complete");
   }
diff --git a/samza-core/src/main/scala/org/apache/samza/util/CoordinatorStreamUtil.scala b/samza-core/src/main/scala/org/apache/samza/util/CoordinatorStreamUtil.scala
index 518639f..37d1393 100644
--- a/samza-core/src/main/scala/org/apache/samza/util/CoordinatorStreamUtil.scala
+++ b/samza-core/src/main/scala/org/apache/samza/util/CoordinatorStreamUtil.scala
@@ -24,10 +24,11 @@
 
 import org.apache.samza.SamzaException
 import org.apache.samza.config._
-import org.apache.samza.coordinator.metadatastore.{CoordinatorStreamStore, NamespaceAwareCoordinatorStreamStore}
+import org.apache.samza.coordinator.metadatastore.NamespaceAwareCoordinatorStreamStore
 import org.apache.samza.coordinator.stream.{CoordinatorStreamSystemConsumer, CoordinatorStreamSystemProducer, CoordinatorStreamValueSerde}
 import org.apache.samza.coordinator.stream.messages.{Delete, SetConfig}
 import org.apache.samza.job.JobRunner
+import org.apache.samza.metadatastore.MetadataStore
 import org.apache.samza.metrics.MetricsRegistryMap
 import org.apache.samza.system.{StreamSpec, SystemAdmin, SystemAdmins, SystemFactory, SystemStream}
 import org.apache.samza.util.ScalaJavaUtil.JavaOptionals
@@ -44,7 +45,23 @@
     val buildConfigFactory = jobConfig.getCoordinatorStreamFactory
     val coordinatorSystemConfig = Class.forName(buildConfigFactory).newInstance().asInstanceOf[CoordinatorStreamConfigFactory].buildCoordinatorStreamConfig(config)
 
-    new MapConfig(coordinatorSystemConfig);
+    new MapConfig(coordinatorSystemConfig)
+  }
+
+  /**
+   * Creates coordinator stream from config if it does not exist, otherwise no-op.
+   *
+   * @param config to create coordinator stream.
+   */
+  def createCoordinatorStream(config: Config): Unit = {
+    val systemAdmins = new SystemAdmins(config)
+
+    info("Creating coordinator stream")
+    val coordinatorSystemStream = CoordinatorStreamUtil.getCoordinatorSystemStream(config)
+    val coordinatorSystemAdmin = systemAdmins.getSystemAdmin(coordinatorSystemStream.getSystem)
+    coordinatorSystemAdmin.start()
+    CoordinatorStreamUtil.createCoordinatorStream(coordinatorSystemStream, coordinatorSystemAdmin)
+    coordinatorSystemAdmin.stop()
   }
 
   /**
@@ -111,12 +128,29 @@
   }
 
   /**
+   * Reads and returns launch config persisted in coordinator stream. Only job.auto sizing configs are currently supported.
+   * @param config full job config
+   * @param metadataStore an instance of the instantiated MetadataStore
+   * @return empty config if auto sizing is disabled, otherwise auto sizing related configs.
+   */
+  def readLaunchConfigFromCoordinatorStream(config: Config, metadataStore: MetadataStore): Config = {
+    if (!config.getBoolean(JobConfig.JOB_AUTOSIZING_ENABLED, false)) {
+      new MapConfig()
+    } else {
+      val config = readConfigFromCoordinatorStream(metadataStore)
+      val launchConfig = config.asScala.filterKeys(key => JobConfig.isAutosizingConfig(key)).asJava
+
+      new MapConfig(launchConfig)
+    }
+  }
+
+  /**
     * Reads and returns the complete configuration stored in the coordinator stream.
-    * @param coordinatorStreamStore an instance of the instantiated {@link CoordinatorStreamStore}.
+    * @param metadataStore an instance of the instantiated {@link CoordinatorStreamStore}.
     * @return the configuration read from the coordinator stream.
     */
-  def readConfigFromCoordinatorStream(coordinatorStreamStore: CoordinatorStreamStore): Config = {
-    val namespaceAwareCoordinatorStreamStore: NamespaceAwareCoordinatorStreamStore = new NamespaceAwareCoordinatorStreamStore(coordinatorStreamStore, SetConfig.TYPE)
+  def readConfigFromCoordinatorStream(metadataStore: MetadataStore): Config = {
+    val namespaceAwareCoordinatorStreamStore: NamespaceAwareCoordinatorStreamStore = new NamespaceAwareCoordinatorStreamStore(metadataStore, SetConfig.TYPE)
     val configFromCoordinatorStream: util.Map[String, Array[Byte]] = namespaceAwareCoordinatorStreamStore.all
     val configMap: util.Map[String, String] = new util.HashMap[String, String]
     for ((key: String, valueAsBytes: Array[Byte]) <- configFromCoordinatorStream.asScala) {
@@ -136,18 +170,10 @@
   }
 
   def writeConfigToCoordinatorStream(config: Config, resetJobConfig: Boolean = true) {
-    debug("config: %s" format (config))
+    debug("config: %s" format config)
     val coordinatorSystemConsumer = new CoordinatorStreamSystemConsumer(config, new MetricsRegistryMap)
     val coordinatorSystemProducer = new CoordinatorStreamSystemProducer(config, new MetricsRegistryMap)
-    val systemAdmins = new SystemAdmins(config)
-
-    // Create the coordinator stream if it doesn't exist
-    info("Creating coordinator stream")
-    val coordinatorSystemStream = CoordinatorStreamUtil.getCoordinatorSystemStream(config)
-    val coordinatorSystemAdmin = systemAdmins.getSystemAdmin(coordinatorSystemStream.getSystem)
-    coordinatorSystemAdmin.start()
-    CoordinatorStreamUtil.createCoordinatorStream(coordinatorSystemStream, coordinatorSystemAdmin)
-    coordinatorSystemAdmin.stop()
+    CoordinatorStreamUtil.createCoordinatorStream(config)
 
     if (resetJobConfig) {
       info("Storing config in coordinator stream.")
@@ -168,7 +194,7 @@
       val jobConfig = new JobConfig(config)
       if (jobConfig.getAutosizingEnabled) {
         // If autosizing is enabled, we retain auto-sizing related configs
-        keysToRemove = keysToRemove.filter(configKey => !jobConfig.isAutosizingConfig(configKey))
+        keysToRemove = keysToRemove.filter(configKey => !JobConfig.isAutosizingConfig(configKey))
       }
 
       info("Deleting old configs that are no longer defined: %s".format(keysToRemove))
diff --git a/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java b/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java
index d1fab98..b4fc75c 100644
--- a/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java
+++ b/samza-core/src/test/java/org/apache/samza/application/descriptors/TestStreamApplicationDescriptorImpl.java
@@ -87,8 +87,8 @@
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mockValueSerde);
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -110,8 +110,8 @@
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mockKVSerde);
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -126,8 +126,8 @@
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor("mockStreamId", null);
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
   }
 
   @Test
@@ -138,8 +138,8 @@
     MockTransformingSystemDescriptor sd = new MockTransformingSystemDescriptor("mockSystem", transformer);
     MockInputDescriptor isd = sd.getInputDescriptor(streamId, mockValueSerde);
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -164,8 +164,8 @@
     MockExpandingSystemDescriptor sd = new MockExpandingSystemDescriptor("mock-system", expander);
     MockInputDescriptor isd = sd.getInputDescriptor(streamId, new IntegerSerde());
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(expandedStreamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -182,8 +182,8 @@
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mock(Serde.class));
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-      }, getConfig());
+      appDesc.getInputStream(isd);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(streamId);
     assertEquals(OpCode.INPUT, inputOpSpec.getOpCode());
@@ -200,9 +200,9 @@
     GenericInputDescriptor isd2 = sd.getInputDescriptor(streamId2, mock(Serde.class));
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd1);
-        appDesc.getInputStream(isd2);
-      }, getConfig());
+      appDesc.getInputStream(isd1);
+      appDesc.getInputStream(isd2);
+    }, getConfig());
 
     InputOperatorSpec inputOpSpec1 = streamAppDesc.getInputOperators().get(streamId1);
     InputOperatorSpec inputOpSpec2 = streamAppDesc.getInputOperators().get(streamId2);
@@ -222,10 +222,10 @@
     GenericInputDescriptor isd1 = sd.getInputDescriptor(streamId, mock(Serde.class));
     GenericInputDescriptor isd2 = sd.getInputDescriptor(streamId, mock(Serde.class));
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd1);
-        // should throw exception
-        appDesc.getInputStream(isd2);
-      }, getConfig());
+      appDesc.getInputStream(isd1);
+      // should throw exception
+      appDesc.getInputStream(isd2);
+    }, getConfig());
   }
 
   @Test
@@ -237,25 +237,25 @@
     GenericOutputDescriptor osd1 = sd2.getOutputDescriptor("test-stream-3", mock(Serde.class));
 
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd1);
-        try {
-          appDesc.getInputStream(isd2);
-          fail("Adding input stream with the same system name but different SystemDescriptor should have failed");
-        } catch (IllegalStateException e) { }
+      appDesc.getInputStream(isd1);
+      try {
+        appDesc.getInputStream(isd2);
+        fail("Adding input stream with the same system name but different SystemDescriptor should have failed");
+      } catch (IllegalStateException e) { }
 
-        try {
-          appDesc.getOutputStream(osd1);
-          fail("adding output stream with the same system name but different SystemDescriptor should have failed");
-        } catch (IllegalStateException e) { }
-      }, getConfig());
+      try {
+        appDesc.getOutputStream(osd1);
+        fail("adding output stream with the same system name but different SystemDescriptor should have failed");
+      } catch (IllegalStateException e) { }
+    }, getConfig());
 
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.withDefaultSystem(sd2);
-        try {
-          appDesc.getInputStream(isd1);
-          fail("Adding input stream with the same system name as the default system but different SystemDescriptor should have failed");
-        } catch (IllegalStateException e) { }
-      }, getConfig());
+      appDesc.withDefaultSystem(sd2);
+      try {
+        appDesc.getInputStream(isd1);
+        fail("Adding input stream with the same system name as the default system but different SystemDescriptor should have failed");
+      } catch (IllegalStateException e) { }
+    }, getConfig());
   }
 
   @Test
@@ -270,8 +270,8 @@
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, mockKVSerde);
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+    }, getConfig());
 
     OutputStreamImpl<TestMessageEnvelope> outputStreamImpl = streamAppDesc.getOutputStreams().get(streamId);
     assertEquals(streamId, outputStreamImpl.getStreamId());
@@ -286,8 +286,8 @@
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, null);
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+    }, getConfig());
   }
 
   @Test
@@ -298,8 +298,8 @@
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, mockValueSerde);
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+    }, getConfig());
 
     OutputStreamImpl<TestMessageEnvelope> outputStreamImpl = streamAppDesc.getOutputStreams().get(streamId);
     assertEquals(streamId, outputStreamImpl.getStreamId());
@@ -315,9 +315,9 @@
     GenericInputDescriptor isd = sd.getInputDescriptor(streamId, mock(Serde.class));
 
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(isd);
-        appDesc.withDefaultSystem(sd); // should throw exception
-      }, getConfig());
+      appDesc.getInputStream(isd);
+      appDesc.withDefaultSystem(sd); // should throw exception
+    }, getConfig());
   }
 
   @Test(expected = IllegalStateException.class)
@@ -326,9 +326,9 @@
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     GenericOutputDescriptor osd = sd.getOutputDescriptor(streamId, mock(Serde.class));
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd);
-        appDesc.withDefaultSystem(sd); // should throw exception
-      }, getConfig());
+      appDesc.getOutputStream(osd);
+      appDesc.withDefaultSystem(sd); // should throw exception
+    }, getConfig());
   }
 
   @Test(expected = IllegalStateException.class)
@@ -346,9 +346,9 @@
     GenericOutputDescriptor osd1 = sd.getOutputDescriptor(streamId, mock(Serde.class));
     GenericOutputDescriptor osd2 = sd.getOutputDescriptor(streamId, mock(Serde.class));
     new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getOutputStream(osd1);
-        appDesc.getOutputStream(osd2); // should throw exception
-      }, getConfig());
+      appDesc.getOutputStream(osd1);
+      appDesc.getOutputStream(osd2); // should throw exception
+    }, getConfig());
   }
 
   @Test
@@ -497,10 +497,10 @@
 
     GenericSystemDescriptor sd = new GenericSystemDescriptor("mockSystem", "mockSystemFactoryClass");
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        appDesc.getInputStream(sd.getInputDescriptor(testStreamId1, mock(Serde.class)));
-        appDesc.getInputStream(sd.getInputDescriptor(testStreamId2, mock(Serde.class)));
-        appDesc.getInputStream(sd.getInputDescriptor(testStreamId3, mock(Serde.class)));
-      }, mockConfig);
+      appDesc.getInputStream(sd.getInputDescriptor(testStreamId1, mock(Serde.class)));
+      appDesc.getInputStream(sd.getInputDescriptor(testStreamId2, mock(Serde.class)));
+      appDesc.getInputStream(sd.getInputDescriptor(testStreamId3, mock(Serde.class)));
+    }, mockConfig);
 
     List<InputOperatorSpec> inputSpecs = new ArrayList<>(streamAppDesc.getInputOperators().values());
     assertEquals(inputSpecs.size(), 3);
@@ -518,8 +518,8 @@
     when(mockTableDescriptor.getTableId()).thenReturn(tableId);
     AtomicReference<TableImpl> table = new AtomicReference<>();
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        table.set((TableImpl) appDesc.getTable(mockTableDescriptor));
-      }, mockConfig);
+      table.set((TableImpl) appDesc.getTable(mockTableDescriptor));
+    }, mockConfig);
     assertEquals(tableId, table.get().getTableId());
   }
 
@@ -567,10 +567,10 @@
   public void testGetTableWithBadId() {
     Config mockConfig = getConfig();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        BaseTableDescriptor mockTableDescriptor = mock(BaseTableDescriptor.class);
-        when(mockTableDescriptor.getTableId()).thenReturn("my.table");
-        appDesc.getTable(mockTableDescriptor);
-      }, mockConfig);
+      BaseTableDescriptor mockTableDescriptor = mock(BaseTableDescriptor.class);
+      when(mockTableDescriptor.getTableId()).thenReturn("my.table");
+      appDesc.getTable(mockTableDescriptor);
+    }, mockConfig);
   }
 
   private Config getConfig() {
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/MockClusterResourceManagerCallback.java b/samza-core/src/test/java/org/apache/samza/clustermanager/MockClusterResourceManagerCallback.java
index 4e6e2c9..f3ab1d2 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/MockClusterResourceManagerCallback.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/MockClusterResourceManagerCallback.java
@@ -48,6 +48,11 @@
   }
 
   @Override
+  public void onStreamProcessorStopFailure(SamzaResource resource, Throwable t) {
+    // no op
+  }
+
+  @Override
   public void onError(Throwable e) {
     error = e;
   }
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestClusterBasedJobCoordinator.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestClusterBasedJobCoordinator.java
index 967bc81..6444451 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestClusterBasedJobCoordinator.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestClusterBasedJobCoordinator.java
@@ -54,7 +54,6 @@
 import org.powermock.modules.junit4.PowerMockRunner;
 
 import static org.junit.Assert.*;
-import static org.mockito.AdditionalMatchers.aryEq;
 import static org.mockito.Matchers.*;
 import static org.mockito.Mockito.any;
 import static org.mockito.Mockito.doReturn;
@@ -62,7 +61,6 @@
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 import static org.powermock.api.mockito.PowerMockito.mock;
-import static org.powermock.api.mockito.PowerMockito.verifyPrivate;
 
 
 /**
@@ -176,38 +174,6 @@
   }
 
   @Test
-  public void testRunWithClassLoader() throws Exception {
-    // partially mock ClusterBasedJobCoordinator (mock runClusterBasedJobCoordinator method only)
-    PowerMockito.spy(ClusterBasedJobCoordinator.class);
-    // save the context classloader to make sure that it gets set properly once the test is finished
-    ClassLoader previousContextClassLoader = Thread.currentThread().getContextClassLoader();
-    ClassLoader classLoader = mock(ClassLoader.class);
-    String[] args = new String[]{"arg0", "arg1"};
-    doReturn(ClusterBasedJobCoordinator.class).when(classLoader).loadClass(ClusterBasedJobCoordinator.class.getName());
-
-    // stub the private static method which is called by reflection
-    PowerMockito.doAnswer(invocation -> {
-        // make sure the only calls to this method has the expected arguments
-        assertArrayEquals(args, invocation.getArgumentAt(0, String[].class));
-        // checks that the context classloader is set correctly
-        assertEquals(classLoader, Thread.currentThread().getContextClassLoader());
-        return null;
-      }).when(ClusterBasedJobCoordinator.class, "runClusterBasedJobCoordinator", any());
-
-    try {
-      ClusterBasedJobCoordinator.runWithClassLoader(classLoader, args);
-      assertEquals(previousContextClassLoader, Thread.currentThread().getContextClassLoader());
-    } finally {
-      // reset it explicitly just in case runWithClassLoader throws an exception
-      Thread.currentThread().setContextClassLoader(previousContextClassLoader);
-    }
-    // make sure that the classloader got used
-    verify(classLoader).loadClass(ClusterBasedJobCoordinator.class.getName());
-    // make sure runClusterBasedJobCoordinator only got called once
-    verifyPrivate(ClusterBasedJobCoordinator.class).invoke("runClusterBasedJobCoordinator", new Object[]{aryEq(args)});
-  }
-
-  @Test
   public void testToArgs() {
     ApplicationConfig appConfig = new ApplicationConfig(new MapConfig(ImmutableMap.of(
         JobConfig.JOB_NAME, "test1",
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java
index 593ddb9..d5819eb 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithHostAffinity.java
@@ -375,10 +375,10 @@
         new ContainerManager(containerPlacementMetadataStore, state, mockClusterResourceManager, true, false);
     // Mock the callback from ClusterManager to add resources to the allocator
     doAnswer((InvocationOnMock invocation) -> {
-        SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
-        spyAllocator.addResource(resource);
-        return null;
-      }).when(mockCPM).onResourcesAvailable(anyList());
+      SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
+      spyAllocator.addResource(resource);
+      return null;
+    }).when(mockCPM).onResourcesAvailable(anyList());
 
     spyAllocator = Mockito.spy(
         new ContainerAllocator(mockClusterResourceManager, config, state, true, containerManager));
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java
index 9d55218..f9104bd 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerAllocatorWithoutHostAffinity.java
@@ -281,10 +281,10 @@
         new ContainerAllocator(mockManager, config, state, false, spyContainerManager));
     // Mock the callback from ClusterManager to add resources to the allocator
     doAnswer((InvocationOnMock invocation) -> {
-        SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
-        spyAllocator.addResource(resource);
-        return null;
-      }).when(mockCPM).onResourcesAvailable(anyList());
+      SamzaResource resource = (SamzaResource) invocation.getArgumentAt(0, List.class).get(0);
+      spyAllocator.addResource(resource);
+      return null;
+    }).when(mockCPM).onResourcesAvailable(anyList());
     // Request Resources
     spyAllocator.requestResources(containersToHostMapping);
     spyThread = new Thread(spyAllocator, "Container Allocator Thread");
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java
index 18e6cb4..0ec635d 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerPlacementActions.java
@@ -51,6 +51,7 @@
 import org.apache.samza.testUtils.MockHttpServer;
 import org.eclipse.jetty.servlet.DefaultServlet;
 import org.eclipse.jetty.servlet.ServletHolder;
+import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -63,6 +64,10 @@
 
 /**
  * Set of Integration tests for container placement actions
+ *
+ * Please note that semaphores are used wherever possible, there are some Thread.sleep used for the main thread to check
+ * on state changes to atomic variables or synchroized metadata objects because of difficulty of plugging semaphores to
+ * those pieces of logic
  */
 @RunWith(MockitoJUnitRunner.class)
 public class TestContainerPlacementActions {
@@ -120,9 +125,9 @@
   private JobModelManager getJobModelManagerWithHostAffinity(Map<String, String> containerIdToHost) {
     Map<String, Map<String, String>> localityMap = new HashMap<>();
     containerIdToHost.forEach((containerId, host) -> {
-        localityMap.put(containerId,
-            ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
-      });
+      localityMap.put(containerId,
+          ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
+    });
     LocalityManager mockLocalityManager = mock(LocalityManager.class);
     when(mockLocalityManager.readContainerLocality()).thenReturn(localityMap);
 
@@ -133,9 +138,9 @@
   private JobModelManager getJobModelManagerWithHostAffinityWithStandby(Map<String, String> containerIdToHost) {
     Map<String, Map<String, String>> localityMap = new HashMap<>();
     containerIdToHost.forEach((containerId, host) -> {
-        localityMap.put(containerId,
-            ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
-      });
+      localityMap.put(containerId,
+          ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
+    });
     LocalityManager mockLocalityManager = mock(LocalityManager.class);
     when(mockLocalityManager.readContainerLocality()).thenReturn(localityMap);
     // Generate JobModel for standby containers
@@ -164,6 +169,13 @@
             clusterResourceManager, Optional.of(allocatorWithHostAffinity), containerManager);
   }
 
+  @After
+  public void teardown() {
+    containerPlacementMetadataStore.stop();
+    cpm.stop();
+    coordinatorStreamStore.close();
+  }
+
   public void setupStandby() throws Exception {
     state = new SamzaApplicationState(getJobModelManagerWithHostAffinityWithStandby(ImmutableMap.of("0", "host-1", "1", "host-2", "0-0", "host-2", "1-0", "host-1")));
     callback = mock(ClusterResourceManager.Callback.class);
@@ -181,9 +193,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -191,9 +203,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -201,9 +213,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
@@ -266,17 +278,18 @@
   @Test(timeout = 30000)
   public void testActionQueuingForConsecutivePlacementActions() throws Exception {
     // Spawn a Request Allocator Thread
-    Thread requestAllocatorThread = new Thread(
-        new ContainerPlacementRequestAllocator(containerPlacementMetadataStore, cpm, new ApplicationConfig(config)),
-        "ContainerPlacement Request Allocator Thread");
+    ContainerPlacementRequestAllocator requestAllocator =
+        new ContainerPlacementRequestAllocator(containerPlacementMetadataStore, cpm, new ApplicationConfig(config), 100);
+    Thread requestAllocatorThread = new Thread(requestAllocator, "ContainerPlacement Request Allocator Thread");
+
     requestAllocatorThread.start();
 
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -284,9 +297,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -294,9 +307,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
@@ -336,7 +349,7 @@
               == ContainerPlacementMessage.StatusCode.SUCCEEDED) {
         break;
       }
-      Thread.sleep(Duration.ofSeconds(5).toMillis());
+      Thread.sleep(100);
     }
 
     assertEquals(state.preferredHostRequests.get(), 4);
@@ -364,6 +377,9 @@
     // Requests from Previous deploy must be cleaned
     assertFalse(containerPlacementMetadataStore.readContainerPlacementRequestMessage(requestUUIDMoveBad).isPresent());
     assertFalse(containerPlacementMetadataStore.readContainerPlacementResponseMessage(requestUUIDMoveBad).isPresent());
+
+    // Cleanup Request Allocator Thread
+    cleanUpRequestAllocatorThread(requestAllocator, requestAllocatorThread);
   }
 
   @Test(timeout = 10000)
@@ -373,12 +389,12 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            List<SamzaResource> resources = (List<SamzaResource>) args[0];
-            if (resources.get(0).getHost().equals("host-1") || resources.get(0).getHost().equals("host-2")) {
-              cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-            }
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          List<SamzaResource> resources = (List<SamzaResource>) args[0];
+          if (resources.get(0).getHost().equals("host-1") || resources.get(0).getHost().equals("host-2")) {
+            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+          }
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -386,9 +402,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -451,9 +467,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -462,14 +478,14 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            SamzaResource host3Resource = (SamzaResource) args[0];
-            if (host3Resource.getHost().equals("host-3")) {
-              cpm.onStreamProcessorLaunchFailure(host3Resource, new Throwable("Custom Exception for Host-3"));
-            } else {
-              cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-            }
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          SamzaResource host3Resource = (SamzaResource) args[0];
+          if (host3Resource.getHost().equals("host-3")) {
+            cpm.onStreamProcessorLaunchFailure(host3Resource, new Throwable("Custom Exception for Host-3"));
+          } else {
+            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+          }
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -477,9 +493,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
@@ -635,8 +651,9 @@
       fail("timed out waiting for the containers to start");
     }
 
-    // Wait for both the containers to be in running state
-    while (state.runningProcessors.size() != 2) {
+    // Wait for both the containers to be in running state & control action metadata to succeed
+    while (state.runningProcessors.size() != 2
+        && metadata.getActionStatus() != ContainerPlacementMessage.StatusCode.SUCCEEDED) {
       Thread.sleep(100);
     }
 
@@ -648,8 +665,6 @@
     assertEquals(state.anyHostRequests.get(), 0);
     // Failed processors must be empty
     assertEquals(state.failedProcessors.size(), 0);
-    // Control Action should be success in this case
-    assertEquals(metadata.getActionStatus(), ContainerPlacementMessage.StatusCode.SUCCEEDED);
   }
 
   @Test(timeout = 10000)
@@ -674,14 +689,14 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            List<SamzaResource> resources = (List<SamzaResource>) args[0];
-            SamzaResource preferredResource = resources.get(0);
-            SamzaResource anyResource =
-                new SamzaResource(preferredResource.getNumCores(), preferredResource.getMemoryMb(),
-                    "host-" + RandomStringUtils.randomAlphanumeric(5), preferredResource.getContainerId());
-            cpm.onResourcesAvailable(ImmutableList.of(anyResource));
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          List<SamzaResource> resources = (List<SamzaResource>) args[0];
+          SamzaResource preferredResource = resources.get(0);
+          SamzaResource anyResource =
+              new SamzaResource(preferredResource.getNumCores(), preferredResource.getMemoryMb(),
+                  "host-" + RandomStringUtils.randomAlphanumeric(5), preferredResource.getContainerId());
+          cpm.onResourcesAvailable(ImmutableList.of(anyResource));
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -837,17 +852,18 @@
     setupStandby();
 
     // Spawn a Request Allocator Thread
-    Thread requestAllocatorThread = new Thread(
-        new ContainerPlacementRequestAllocator(containerPlacementMetadataStore, cpm, new ApplicationConfig(config)),
-        "ContainerPlacement Request Allocator Thread");
+    ContainerPlacementRequestAllocator requestAllocator =
+        new ContainerPlacementRequestAllocator(containerPlacementMetadataStore, cpm, new ApplicationConfig(config), 100);
+    Thread requestAllocatorThread = new Thread(requestAllocator, "ContainerPlacement Request Allocator Thread");
+
     requestAllocatorThread.start();
 
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesAvailable((List<SamzaResource>) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onResourcesAvailable(anyList());
@@ -855,9 +871,9 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
-          }, "AMRMClientAsync").start();
+          Object[] args = invocation.getArguments();
+          cpm.onStreamProcessorLaunchSuccess((SamzaResource) args[0]);
+        }, "AMRMClientAsync").start();
         return null;
       }
     }).when(callback).onStreamProcessorLaunchSuccess(any());
@@ -865,10 +881,10 @@
     doAnswer(new Answer<Void>() {
       public Void answer(InvocationOnMock invocation) {
         new Thread(() -> {
-            Object[] args = invocation.getArguments();
-            cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
-          }, "AMRMClientAsync").start();
-          return null;
+          Object[] args = invocation.getArguments();
+          cpm.onResourcesCompleted((List<SamzaResourceStatus>) args[0]);
+        }, "AMRMClientAsync").start();
+        return null;
       }
     }).when(callback).onResourcesCompleted(anyList());
 
@@ -911,7 +927,7 @@
               == ContainerPlacementMessage.StatusCode.BAD_REQUEST) {
         break;
       }
-      Thread.sleep(Duration.ofSeconds(5).toMillis());
+      Thread.sleep(100);
     }
 
     // App running state should remain the same
@@ -948,7 +964,7 @@
               == ContainerPlacementMessage.StatusCode.SUCCEEDED) {
         break;
       }
-      Thread.sleep(Duration.ofSeconds(5).toMillis());
+      Thread.sleep(100);
     }
 
     assertEquals(4, state.runningProcessors.size());
@@ -976,6 +992,9 @@
     // Request should be deleted as soon as ita accepted / being acted upon
     assertFalse(containerPlacementMetadataStore.readContainerPlacementRequestMessage(standbyMoveRequest).isPresent());
     assertFalse(containerPlacementMetadataStore.readContainerPlacementRequestMessage(activeMoveRequest).isPresent());
+
+    // Cleanup Request Allocator Thread
+    cleanUpRequestAllocatorThread(requestAllocator, requestAllocatorThread);
   }
 
   private void assertResponseMessage(ContainerPlacementResponseMessage responseMessage,
@@ -1011,4 +1030,13 @@
     // Request shall be deleted as soon as it is acted upon
     assertFalse(containerPlacementMetadataStore.readContainerPlacementRequestMessage(requestMessage.getUuid()).isPresent());
   }
+
+  private void cleanUpRequestAllocatorThread(ContainerPlacementRequestAllocator requestAllocator, Thread containerPlacementRequestAllocatorThread) {
+    requestAllocator.stop();
+    try {
+      containerPlacementRequestAllocatorThread.join();
+    } catch (InterruptedException ie) {
+      Thread.currentThread().interrupt();
+    }
+  }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java
index 14771e7..a5dbe77 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestContainerProcessManager.java
@@ -112,8 +112,8 @@
   private JobModelManager getJobModelManagerWithHostAffinity(Map<String, String> containerIdToHost) {
     Map<String, Map<String, String>> localityMap = new HashMap<>();
     containerIdToHost.forEach((containerId, host) -> {
-        localityMap.put(containerId, ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
-      });
+      localityMap.put(containerId, ImmutableMap.of(SetContainerHostMapping.HOST_KEY, containerIdToHost.get(containerId)));
+    });
     LocalityManager mockLocalityManager = mock(LocalityManager.class);
     when(mockLocalityManager.readContainerLocality()).thenReturn(localityMap);
 
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestJobCoordinatorLaunchUtil.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestJobCoordinatorLaunchUtil.java
index 827e312..af27423 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestJobCoordinatorLaunchUtil.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestJobCoordinatorLaunchUtil.java
@@ -22,6 +22,7 @@
 import java.util.HashMap;
 import java.util.Map;
 import org.apache.samza.application.MockStreamApplication;
+import org.apache.samza.config.Config;
 import org.apache.samza.config.JobConfig;
 import org.apache.samza.config.MapConfig;
 import org.apache.samza.config.loaders.PropertiesConfigLoaderFactory;
@@ -37,12 +38,15 @@
 import org.powermock.modules.junit4.PowerMockRunner;
 
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 import static org.powermock.api.mockito.PowerMockito.mock;
 import static org.powermock.api.mockito.PowerMockito.verifyNew;
+import static org.powermock.api.mockito.PowerMockito.verifyStatic;
+
 
 @RunWith(PowerMockRunner.class)
 @PrepareForTest({
@@ -57,23 +61,31 @@
     config.put(JobConfig.CONFIG_LOADER_FACTORY, PropertiesConfigLoaderFactory.class.getName());
     config.put(PropertiesConfigLoaderFactory.CONFIG_LOADER_PROPERTIES_PREFIX + "path",
         getClass().getResource("/test.properties").getPath());
-    JobConfig originalConfig = new JobConfig(ConfigUtil.loadConfig(new MapConfig(config)));
-    JobConfig fullJobConfig = new JobConfig(new MapConfig(originalConfig, Collections.singletonMap("isAfterPlanning", "true")));
+    Config originalConfig = new JobConfig(ConfigUtil.loadConfig(new MapConfig(config)));
+    Config fullConfig = new MapConfig(originalConfig, Collections.singletonMap("isAfterPlanning", "true"));
+    Config autoSizingConfig = new MapConfig(Collections.singletonMap(JobConfig.JOB_AUTOSIZING_CONTAINER_COUNT, "10"));
+    Config finalConfig = new MapConfig(autoSizingConfig, fullConfig);
 
     RemoteJobPlanner mockJobPlanner = mock(RemoteJobPlanner.class);
     CoordinatorStreamStore mockCoordinatorStreamStore = mock(CoordinatorStreamStore.class);
     ClusterBasedJobCoordinator mockJC = mock(ClusterBasedJobCoordinator.class);
 
     PowerMockito.mockStatic(CoordinatorStreamUtil.class);
+    PowerMockito.doNothing().when(CoordinatorStreamUtil.class, "createCoordinatorStream", any());
     PowerMockito.doReturn(new MapConfig()).when(CoordinatorStreamUtil.class, "buildCoordinatorStreamConfig", any());
+    PowerMockito.doReturn(autoSizingConfig).when(CoordinatorStreamUtil.class, "readLaunchConfigFromCoordinatorStream", any(), any());
     PowerMockito.whenNew(CoordinatorStreamStore.class).withAnyArguments().thenReturn(mockCoordinatorStreamStore);
     PowerMockito.whenNew(RemoteJobPlanner.class).withAnyArguments().thenReturn(mockJobPlanner);
     PowerMockito.whenNew(ClusterBasedJobCoordinator.class).withAnyArguments().thenReturn(mockJC);
-    when(mockJobPlanner.prepareJobs()).thenReturn(Collections.singletonList(fullJobConfig));
+    when(mockJobPlanner.prepareJobs()).thenReturn(Collections.singletonList(new JobConfig(fullConfig)));
 
     JobCoordinatorLaunchUtil.run(new MockStreamApplication(), originalConfig);
 
-    verifyNew(ClusterBasedJobCoordinator.class).withArguments(any(MetricsRegistryMap.class), eq(mockCoordinatorStreamStore), eq(fullJobConfig));
+    verifyNew(ClusterBasedJobCoordinator.class).withArguments(any(MetricsRegistryMap.class), eq(mockCoordinatorStreamStore), eq(finalConfig));
     verify(mockJC, times(1)).run();
+    verifyStatic(times(1));
+    CoordinatorStreamUtil.createCoordinatorStream(any());
+    verifyStatic(times(1));
+    CoordinatorStreamUtil.writeConfigToCoordinatorStream(any(), anyBoolean());
   }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java b/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java
index 459f39d..c5f3ec1 100644
--- a/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java
+++ b/samza-core/src/test/java/org/apache/samza/clustermanager/TestStandbyAllocator.java
@@ -74,9 +74,9 @@
           containerConstraints.contains(containerID));
 
       containerConstraints.forEach(containerConstraintID -> {
-          Assert.assertTrue("Constrained containers IDs should correspond to the active container",
-              containerID.split("-")[0].equals(containerConstraintID.split("-")[0]));
-        });
+        Assert.assertTrue("Constrained containers IDs should correspond to the active container",
+            containerID.split("-")[0].equals(containerConstraintID.split("-")[0]));
+      });
     }
   }
 
@@ -118,11 +118,11 @@
   private static Map<TaskName, TaskModel> getStandbyTasks(Map<TaskName, TaskModel> tasks, int replicaNum) {
     Map<TaskName, TaskModel> standbyTasks = new HashMap<>();
     tasks.forEach((taskName, taskModel) -> {
-        TaskName standbyTaskName = StandbyTaskUtil.getStandbyTaskName(taskName, replicaNum);
-        standbyTasks.put(standbyTaskName,
-            new TaskModel(standbyTaskName, taskModel.getSystemStreamPartitions(), taskModel.getChangelogPartition(),
-                TaskMode.Standby));
-      });
+      TaskName standbyTaskName = StandbyTaskUtil.getStandbyTaskName(taskName, replicaNum);
+      standbyTasks.put(standbyTaskName,
+          new TaskModel(standbyTaskName, taskModel.getSystemStreamPartitions(), taskModel.getChangelogPartition(),
+              TaskMode.Standby));
+    });
     return standbyTasks;
   }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/config/TestJobConfig.java b/samza-core/src/test/java/org/apache/samza/config/TestJobConfig.java
index abe6dfa..00c2d5e 100644
--- a/samza-core/src/test/java/org/apache/samza/config/TestJobConfig.java
+++ b/samza-core/src/test/java/org/apache/samza/config/TestJobConfig.java
@@ -547,15 +547,13 @@
 
   @Test
   public void testGetClusterBasedJobCoordinatorDependencyIsolationEnabled() {
-    Config config =
-        new MapConfig(ImmutableMap.of(JobConfig.CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "true"));
-    assertTrue(new JobConfig(config).getClusterBasedJobCoordinatorDependencyIsolationEnabled());
+    Config config = new MapConfig(ImmutableMap.of(JobConfig.JOB_SPLIT_DEPLOYMENT_ENABLED, "true"));
+    assertTrue(new JobConfig(config).isSplitDeploymentEnabled());
 
-    config =
-        new MapConfig(ImmutableMap.of(JobConfig.CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "false"));
-    assertFalse(new JobConfig(config).getClusterBasedJobCoordinatorDependencyIsolationEnabled());
+    config = new MapConfig(ImmutableMap.of(JobConfig.JOB_SPLIT_DEPLOYMENT_ENABLED, "false"));
+    assertFalse(new JobConfig(config).isSplitDeploymentEnabled());
 
-    assertFalse(new JobConfig(new MapConfig()).getClusterBasedJobCoordinatorDependencyIsolationEnabled());
+    assertFalse(new JobConfig(new MapConfig()).isSplitDeploymentEnabled());
   }
 
   @Test
diff --git a/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java b/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java
index bbea19e..40ec38e 100644
--- a/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java
+++ b/samza-core/src/test/java/org/apache/samza/config/TestStreamConfig.java
@@ -49,11 +49,11 @@
   public void testGetStreamMsgSerde() {
     String value = "my.msg.serde";
     doTestSamzaProperty(StreamConfig.MSG_SERDE, value,
-        (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamMsgSerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamMsgSerde(systemStream)));
     doTestSamzaProperty(StreamConfig.MSG_SERDE, "",
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.MSG_SERDE,
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamMsgSerde(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getStreamMsgSerde);
   }
 
@@ -61,39 +61,39 @@
   public void testGetStreamKeySerde() {
     String value = "my.key.serde";
     doTestSamzaProperty(StreamConfig.KEY_SERDE, value,
-        (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamKeySerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(value), config.getStreamKeySerde(systemStream)));
     doTestSamzaProperty(StreamConfig.KEY_SERDE, "",
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.KEY_SERDE,
-        (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(), config.getStreamKeySerde(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getStreamKeySerde);
   }
 
   @Test
   public void testGetResetOffset() {
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "true",
-        (config, systemStream) -> assertTrue(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertTrue(config.getResetOffset(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "false",
-        (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "unknown_value",
-        (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_RESET_OFFSET,
-        (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
+      (config, systemStream) -> assertFalse(config.getResetOffset(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getResetOffset);
   }
 
   @Test
   public void testIsResetOffsetConfigured() {
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "true",
-        (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "false",
-        (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.CONSUMER_RESET_OFFSET, "unknown_value",
-        (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isResetOffsetConfigured(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_RESET_OFFSET,
-        (config, systemStream) -> assertFalse(config.isResetOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertFalse(config.isResetOffsetConfigured(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::isResetOffsetConfigured);
   }
 
@@ -101,12 +101,12 @@
   public void testGetDefaultStreamOffset() {
     String value = "my_offset_default";
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, value,
-        (config, systemStream) -> assertEquals(Optional.of(value), config.getDefaultStreamOffset(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(value), config.getDefaultStreamOffset(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, "",
-        (config, systemStream) -> assertEquals(Optional.of(""), config.getDefaultStreamOffset(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.of(""), config.getDefaultStreamOffset(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_OFFSET_DEFAULT,
-        (config, systemStream) -> assertEquals(Optional.empty(),
-            new StreamConfig(config).getDefaultStreamOffset(systemStream)));
+      (config, systemStream) -> assertEquals(Optional.empty(),
+          new StreamConfig(config).getDefaultStreamOffset(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getDefaultStreamOffset);
   }
 
@@ -114,52 +114,52 @@
   public void testIsDefaultStreamOffsetConfigured() {
     String value = "my_offset_default";
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, value,
-        (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
     doTestSamzaProperty(StreamConfig.CONSUMER_OFFSET_DEFAULT, "",
-        (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertTrue(config.isDefaultStreamOffsetConfigured(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.CONSUMER_OFFSET_DEFAULT,
-        (config, systemStream) -> assertFalse(config.isDefaultStreamOffsetConfigured(systemStream)));
+      (config, systemStream) -> assertFalse(config.isDefaultStreamOffsetConfigured(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::isDefaultStreamOffsetConfigured);
   }
 
   @Test
   public void testGetBootstrapEnabled() {
     doTestSamzaProperty(StreamConfig.BOOTSTRAP, "true",
-        (config, systemStream) -> assertTrue(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertTrue(config.getBootstrapEnabled(systemStream)));
     doTestSamzaProperty(StreamConfig.BOOTSTRAP, "false",
-        (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.BOOTSTRAP, "unknown_value",
-        (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.BOOTSTRAP,
-        (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBootstrapEnabled(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getBootstrapEnabled);
   }
 
   @Test
   public void testGetBroadcastEnabled() {
     doTestSamzaProperty(StreamConfig.BROADCAST, "true",
-        (config, systemStream) -> assertTrue(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertTrue(config.getBroadcastEnabled(systemStream)));
     doTestSamzaProperty(StreamConfig.BROADCAST, "false",
-        (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
     // if not true/false, then use false
     doTestSamzaProperty(StreamConfig.BROADCAST, "unknown_value",
-        (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.BROADCAST,
-        (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
+      (config, systemStream) -> assertFalse(config.getBroadcastEnabled(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getBroadcastEnabled);
   }
 
   @Test
   public void testGetPriority() {
     doTestSamzaProperty(StreamConfig.PRIORITY, "0",
-        (config, systemStream) -> assertEquals(0, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(0, config.getPriority(systemStream)));
     doTestSamzaProperty(StreamConfig.PRIORITY, "100",
-        (config, systemStream) -> assertEquals(100, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(100, config.getPriority(systemStream)));
     doTestSamzaProperty(StreamConfig.PRIORITY, "-1",
-        (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
     doTestSamzaPropertyDoesNotExist(StreamConfig.PRIORITY,
-        (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
+      (config, systemStream) -> assertEquals(-1, config.getPriority(systemStream)));
     doTestSamzaPropertyInvalidConfig(StreamConfig::getPriority);
   }
 
diff --git a/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java b/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java
index 65701b3..2445c00 100644
--- a/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java
+++ b/samza-core/src/test/java/org/apache/samza/container/TestContainerHeartbeatMonitor.java
@@ -103,16 +103,16 @@
     ScheduledExecutorService scheduler = mock(ScheduledExecutorService.class);
     when(scheduler.scheduleAtFixedRate(any(), eq(0L), eq((long) ContainerHeartbeatMonitor.SCHEDULE_MS),
         eq(TimeUnit.MILLISECONDS))).thenAnswer(invocation -> {
-            Runnable command = invocation.getArgumentAt(0, Runnable.class);
-            (new Thread(() -> {
-                // just need to invoke the command once for these tests
-                command.run();
-                // notify that the execution is done, so verifications can begin
-                schedulerFixedRateExecutionLatch.countDown();
-              })).start();
-            // return value is not used by ContainerHeartbeatMonitor
-            return null;
-          });
+          Runnable command = invocation.getArgumentAt(0, Runnable.class);
+          (new Thread(() -> {
+            // just need to invoke the command once for these tests
+            command.run();
+            // notify that the execution is done, so verifications can begin
+            schedulerFixedRateExecutionLatch.countDown();
+          })).start();
+          // return value is not used by ContainerHeartbeatMonitor
+          return null;
+        });
     return scheduler;
   }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java b/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java
index 4556679..da855a1 100644
--- a/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java
+++ b/samza-core/src/test/java/org/apache/samza/container/TestRunLoop.java
@@ -19,54 +19,33 @@
 
 package org.apache.samza.container;
 
-import java.util.ArrayList;
+import com.google.common.collect.ImmutableMap;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.samza.Partition;
-import org.apache.samza.checkpoint.Checkpoint;
+import org.apache.samza.SamzaException;
 import org.apache.samza.checkpoint.OffsetManager;
-import org.apache.samza.context.ContainerContext;
-import org.apache.samza.context.JobContext;
-import org.apache.samza.job.model.TaskModel;
 import org.apache.samza.metrics.MetricsRegistryMap;
 import org.apache.samza.system.IncomingMessageEnvelope;
-import org.apache.samza.system.SystemAdmin;
-import org.apache.samza.system.SystemAdmins;
-import org.apache.samza.system.SystemConsumer;
 import org.apache.samza.system.SystemConsumers;
 import org.apache.samza.system.SystemStreamPartition;
-import org.apache.samza.system.TestSystemConsumers;
-import org.apache.samza.task.AsyncStreamTask;
-import org.apache.samza.task.EndOfStreamListenerTask;
-import org.apache.samza.task.MessageCollector;
+import org.apache.samza.task.ReadableCoordinator;
 import org.apache.samza.task.TaskCallback;
-import org.apache.samza.task.TaskCallbackImpl;
+import org.apache.samza.task.TaskCallbackFactory;
 import org.apache.samza.task.TaskCoordinator;
-import org.apache.samza.task.TaskInstanceCollector;
-import org.apache.samza.task.WindowableTask;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
-import org.mockito.Mockito;
-import scala.Option;
-import scala.collection.JavaConverters;
+import org.mockito.InOrder;
 
 import static org.junit.Assert.assertEquals;
-import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.anyLong;
-import static org.mockito.Mockito.anyObject;
-import static org.mockito.Mockito.atLeastOnce;
-import static org.mockito.Mockito.eq;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.*;
+
 
 public class TestRunLoop {
   // Immutable objects shared by all test methods.
@@ -83,701 +62,468 @@
   private final TaskName taskName1 = new TaskName(p1.toString());
   private final SystemStreamPartition ssp0 = new SystemStreamPartition("testSystem", "testStream", p0);
   private final SystemStreamPartition ssp1 = new SystemStreamPartition("testSystem", "testStream", p1);
-  private final IncomingMessageEnvelope envelope0 = new IncomingMessageEnvelope(ssp0, "0", "key0", "value0");
-  private final IncomingMessageEnvelope envelope1 = new IncomingMessageEnvelope(ssp1, "1", "key1", "value1");
-  private final IncomingMessageEnvelope envelope3 = new IncomingMessageEnvelope(ssp0, "1", "key0", "value0");
+  private final IncomingMessageEnvelope envelope00 = new IncomingMessageEnvelope(ssp0, "0", "key0", "value0");
+  private final IncomingMessageEnvelope envelope11 = new IncomingMessageEnvelope(ssp1, "1", "key1", "value1");
+  private final IncomingMessageEnvelope envelope01 = new IncomingMessageEnvelope(ssp0, "1", "key0", "value0");
   private final IncomingMessageEnvelope ssp0EndOfStream = IncomingMessageEnvelope.buildEndOfStreamEnvelope(ssp0);
   private final IncomingMessageEnvelope ssp1EndOfStream = IncomingMessageEnvelope.buildEndOfStreamEnvelope(ssp1);
 
-  TaskInstance createTaskInstance(AsyncStreamTask task, TaskName taskName, SystemStreamPartition ssp, OffsetManager manager, SystemConsumers consumers) {
-    TaskModel taskModel = mock(TaskModel.class);
-    when(taskModel.getTaskName()).thenReturn(taskName);
-    TaskInstanceMetrics taskInstanceMetrics = new TaskInstanceMetrics("task", new MetricsRegistryMap());
-    scala.collection.immutable.Set<SystemStreamPartition> sspSet = JavaConverters.asScalaSetConverter(Collections.singleton(ssp)).asScala().toSet();
-    return new TaskInstance(task,
-        taskModel,
-        taskInstanceMetrics,
-        null,
-        consumers,
-        mock(TaskInstanceCollector.class),
-        manager,
-        null,
-        null,
-        sspSet,
-        new TaskInstanceExceptionHandler(taskInstanceMetrics, new scala.collection.immutable.HashSet<String>()),
-        null,
-        null,
-        null,
-        null,
-        mock(JobContext.class),
-        mock(ContainerContext.class),
-        Option.apply(null),
-        Option.apply(null),
-        Option.apply(null));
-  }
-
-  interface TestCode {
-    void run(TaskCallback callback);
-  }
-
-  class TestTask implements AsyncStreamTask, WindowableTask, EndOfStreamListenerTask {
-    private final boolean shutdown;
-    private final boolean commit;
-    private final boolean success;
-    private final ExecutorService callbackExecutor = Executors.newFixedThreadPool(4);
-
-    private AtomicInteger completed = new AtomicInteger(0);
-    private TestCode callbackHandler = null;
-    private TestCode commitHandler = null;
-    private TaskCoordinator.RequestScope commitRequest = null;
-    private TaskCoordinator.RequestScope shutdownRequest = TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER;
-
-    private CountDownLatch processedMessagesLatch = null;
-
-    private volatile int windowCount = 0;
-    private volatile int processed = 0;
-    private volatile int committed = 0;
-
-    private int maxMessagesInFlight;
-
-    TestTask(boolean success, boolean commit, boolean shutdown, CountDownLatch processedMessagesLatch) {
-      this.success = success;
-      this.shutdown = shutdown;
-      this.commit = commit;
-      this.processedMessagesLatch = processedMessagesLatch;
-    }
-
-    TestTask(boolean success, boolean commit, boolean shutdown,
-             CountDownLatch processedMessagesLatch, int maxMessagesInFlight) {
-      this(success, commit, shutdown, processedMessagesLatch);
-      this.maxMessagesInFlight = maxMessagesInFlight;
-    }
-
-    @Override
-    public void processAsync(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator, TaskCallback callback) {
-
-      if (maxMessagesInFlight == 1) {
-        assertEquals(processed, completed.get());
-      }
-
-      processed++;
-
-      if (commit) {
-        if (commitHandler != null) {
-          callbackExecutor.submit(() -> commitHandler.run(callback));
-        }
-        if (commitRequest != null) {
-          coordinator.commit(commitRequest);
-        }
-        committed++;
-      }
-
-      if (shutdown) {
-        coordinator.shutdown(shutdownRequest);
-      }
-
-      callbackExecutor.submit(() -> {
-          if (callbackHandler != null) {
-            callbackHandler.run(callback);
-          }
-
-          completed.incrementAndGet();
-
-          if (success) {
-            callback.complete();
-          } else {
-            callback.failure(new Exception("process failure"));
-          }
-
-          if (processedMessagesLatch != null) {
-            processedMessagesLatch.countDown();
-          }
-        });
-    }
-
-    @Override
-    public void window(MessageCollector collector, TaskCoordinator coordinator) throws Exception {
-      windowCount++;
-
-      if (shutdown && windowCount == 4) {
-        coordinator.shutdown(shutdownRequest);
-      }
-    }
-
-    @Override
-    public void onEndOfStream(MessageCollector collector, TaskCoordinator coordinator) {
-      coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
-    }
-
-    void setShutdownRequest(TaskCoordinator.RequestScope shutdownRequest) {
-      this.shutdownRequest = shutdownRequest;
-    }
-
-    void setCommitRequest(TaskCoordinator.RequestScope commitRequest) {
-      this.commitRequest = commitRequest;
-    }
-  }
-
   @Rule
   public Timeout maxTestDurationInSeconds = Timeout.seconds(120);
 
   @Test
-  public void testProcessMultipleTasks() throws Exception {
-    CountDownLatch task0ProcessedMessages = new CountDownLatch(1);
-    CountDownLatch task1ProcessedMessages = new CountDownLatch(1);
+  public void testProcessMultipleTasks() {
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
 
-    TestTask task0 = new TestTask(true, true, false, task0ProcessedMessages);
-    TestTask task1 = new TestTask(true, false, true, task1ProcessedMessages);
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
 
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(taskName0, task0);
+    tasks.put(taskName1, task1);
 
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0).thenReturn(envelope1).thenReturn(null);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope11).thenReturn(ssp0EndOfStream).thenReturn(ssp1EndOfStream).thenReturn(null);
     runLoop.run();
 
-    task0ProcessedMessages.await();
-    task1ProcessedMessages.await();
+    verify(task0).process(eq(envelope00), any(), any());
+    verify(task1).process(eq(envelope11), any(), any());
 
-    assertEquals(1, task0.processed);
-    assertEquals(1, task0.completed.get());
-    assertEquals(1, task1.processed);
-    assertEquals(1, task1.completed.get());
-    assertEquals(2L, containerMetrics.envelopes().getCount());
+    assertEquals(4L, containerMetrics.envelopes().getCount());
+  }
+
+  @Test
+  public void testProcessInOrder() {
+    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope01).thenReturn(ssp0EndOfStream).thenReturn(null);
+
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+
+    Map<TaskName, RunLoopTask> tasks = ImmutableMap.of(taskName0, task0);
+    int maxMessagesInFlight = 1;
+    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
+                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
+    runLoop.run();
+
+    InOrder inOrder = inOrder(task0);
+    inOrder.verify(task0).process(eq(envelope00), any(), any());
+    inOrder.verify(task0).process(eq(envelope01), any(), any());
+  }
+
+  @Test
+  public void testProcessCallbacksCompletedOutOfOrder() {
+    int maxMessagesInFlight = 2;
+    ExecutorService taskExecutor = Executors.newFixedThreadPool(1);
+    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
+    OffsetManager offsetManager = mock(OffsetManager.class);
+
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    when(task0.offsetManager()).thenReturn(offsetManager);
+    CountDownLatch firstMessageBarrier = new CountDownLatch(1);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      taskExecutor.submit(() -> {
+        firstMessageBarrier.await();
+        coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+        coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+        callback.complete();
+        return null;
+      });
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
+
+    doAnswer(invocation -> {
+      assertEquals(1, task0.metrics().messagesInFlight().getValue());
+      assertEquals(0, task0.metrics().asyncCallbackCompleted().getCount());
+
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      callback.complete();
+      firstMessageBarrier.countDown();
+      return null;
+    }).when(task0).process(eq(envelope01), any(), any());
+
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(taskName0, task0);
+
+    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
+                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope01).thenReturn(null);
+    runLoop.run();
+
+    InOrder inOrder = inOrder(task0);
+    inOrder.verify(task0).process(eq(envelope00), any(), any());
+    inOrder.verify(task0).process(eq(envelope01), any(), any());
+
+    verify(offsetManager).update(eq(taskName0), eq(ssp0), eq(envelope00.getOffset()));
+
     assertEquals(2L, containerMetrics.processes().getCount());
   }
 
   @Test
-  public void testProcessInOrder() throws Exception {
-    CountDownLatch task0ProcessedMessages = new CountDownLatch(2);
-    CountDownLatch task1ProcessedMessages = new CountDownLatch(1);
+  public void testWindow() {
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
-
-    TestTask task0 = new TestTask(true, true, false, task0ProcessedMessages);
-    TestTask task1 = new TestTask(true, false, false, task1ProcessedMessages);
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
-
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
 
     int maxMessagesInFlight = 1;
-    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0).thenReturn(envelope3).thenReturn(envelope1).thenReturn(ssp0EndOfStream).thenReturn(ssp1EndOfStream).thenReturn(null);
-    runLoop.run();
-
-    // Wait till the tasks completes processing all the messages.
-    task0ProcessedMessages.await();
-    task1ProcessedMessages.await();
-
-    assertEquals(2, task0.processed);
-    assertEquals(2, task0.completed.get());
-    assertEquals(1, task1.processed);
-    assertEquals(1, task1.completed.get());
-    assertEquals(5L, containerMetrics.envelopes().getCount());
-    assertEquals(3L, containerMetrics.processes().getCount());
-    assertEquals(2L, t0.metrics().asyncCallbackCompleted().getCount());
-    assertEquals(1L, t1.metrics().asyncCallbackCompleted().getCount());
-  }
-
-  private TestCode buildOutofOrderCallback(final TestTask task) {
-    final CountDownLatch latch = new CountDownLatch(1);
-    return new TestCode() {
-      @Override
-      public void run(TaskCallback callback) {
-        IncomingMessageEnvelope envelope = ((TaskCallbackImpl) callback).getEnvelope();
-        if (envelope.equals(envelope0)) {
-          // process first message will wait till the second one is processed
-          try {
-            latch.await();
-          } catch (InterruptedException e) {
-            e.printStackTrace();
-          }
-        } else {
-          // second envelope complete first
-          assertEquals(0, task.completed.get());
-          latch.countDown();
-        }
-      }
-    };
-  }
-
-  @Test
-  public void testProcessOutOfOrder() throws Exception {
-    int maxMessagesInFlight = 2;
-
-    CountDownLatch task0ProcessedMessagesLatch = new CountDownLatch(2);
-    CountDownLatch task1ProcessedMessagesLatch = new CountDownLatch(1);
-
-    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
-
-    TestTask task0 = new TestTask(true, true, false, task0ProcessedMessagesLatch, maxMessagesInFlight);
-    TestTask task1 = new TestTask(true, false, false, task1ProcessedMessagesLatch, maxMessagesInFlight);
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
-
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
-
-    task0.callbackHandler = buildOutofOrderCallback(task0);
-    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0).thenReturn(envelope3).thenReturn(envelope1).thenReturn(ssp0EndOfStream).thenReturn(ssp1EndOfStream).thenReturn(null);
-    runLoop.run();
-
-    task0ProcessedMessagesLatch.await();
-    task1ProcessedMessagesLatch.await();
-
-    assertEquals(2, task0.processed);
-    assertEquals(2, task0.completed.get());
-    assertEquals(1, task1.processed);
-    assertEquals(1, task1.completed.get());
-    assertEquals(5L, containerMetrics.envelopes().getCount());
-    assertEquals(3L, containerMetrics.processes().getCount());
-  }
-
-  @Test
-  public void testWindow() throws Exception {
-    TestTask task0 = new TestTask(true, true, false, null);
-    TestTask task1 = new TestTask(true, false, true, null);
-
-    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
-
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
-
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
-
     long windowMs = 1;
-    int maxMessagesInFlight = 1;
+    RunLoopTask task = getMockRunLoopTask(taskName0, ssp0);
+    when(task.isWindowableTask()).thenReturn(true);
+
+    final AtomicInteger windowCount = new AtomicInteger(0);
+    doAnswer(x -> {
+      windowCount.incrementAndGet();
+      if (windowCount.get() == 4) {
+        x.getArgumentAt(0, ReadableCoordinator.class).shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+      }
+      return null;
+    }).when(task).window(any());
+
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(taskName0, task);
+
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     when(consumerMultiplexer.choose(false)).thenReturn(null);
     runLoop.run();
 
-    assertEquals(4, task1.windowCount);
+    verify(task, times(4)).window(any());
   }
 
   @Test
-  public void testCommitSingleTask() throws Exception {
-    CountDownLatch task0ProcessedMessagesLatch = new CountDownLatch(1);
-    CountDownLatch task1ProcessedMessagesLatch = new CountDownLatch(1);
-
+  public void testCommitSingleTask() {
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
 
-    TestTask task0 = new TestTask(true, true, false, task0ProcessedMessagesLatch);
-    task0.setCommitRequest(TaskCoordinator.RequestScope.CURRENT_TASK);
-    TestTask task1 = new TestTask(true, false, true, task1ProcessedMessagesLatch);
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
 
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
+      coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+      coordinator.shutdown(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
+
+      callback.complete();
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
+
+    RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
+
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(this.taskName0, task0);
+    tasks.put(taskName1, task1);
 
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
                                             callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     //have a null message in between to make sure task0 finishes processing and invoke the commit
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0)
-        .thenAnswer(x -> {
-            task0ProcessedMessagesLatch.await();
-            return null;
-          }).thenReturn(envelope1).thenReturn(null);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope11).thenReturn(null);
 
     runLoop.run();
 
-    task0ProcessedMessagesLatch.await();
-    task1ProcessedMessagesLatch.await();
+    verify(task0).process(any(), any(), any());
+    verify(task1).process(any(), any(), any());
 
-    verify(offsetManager).buildCheckpoint(eq(taskName0));
-    verify(offsetManager).writeCheckpoint(eq(taskName0), any(Checkpoint.class));
-    verify(offsetManager, never()).buildCheckpoint(eq(taskName1));
-    verify(offsetManager, never()).writeCheckpoint(eq(taskName1), any(Checkpoint.class));
+    verify(task0).commit();
+    verify(task1, never()).commit();
   }
 
   @Test
-  public void testCommitAllTasks() throws Exception {
-    CountDownLatch task0ProcessedMessagesLatch = new CountDownLatch(1);
-    CountDownLatch task1ProcessedMessagesLatch = new CountDownLatch(1);
-
+  public void testCommitAllTasks() {
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
 
-    TestTask task0 = new TestTask(true, true, false, task0ProcessedMessagesLatch);
-    task0.setCommitRequest(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
-    TestTask task1 = new TestTask(true, false, true, task1ProcessedMessagesLatch);
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
 
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
+      coordinator.commit(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
+      coordinator.shutdown(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
 
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
+      callback.complete();
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
+
+    RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
+
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(this.taskName0, task0);
+    tasks.put(taskName1, task1);
+
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     //have a null message in between to make sure task0 finishes processing and invoke the commit
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0)
-        .thenAnswer(x -> {
-            task0ProcessedMessagesLatch.await();
-            return null;
-          }).thenReturn(envelope1).thenReturn(null);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope11).thenReturn(null);
+
     runLoop.run();
 
-    task0ProcessedMessagesLatch.await();
-    task1ProcessedMessagesLatch.await();
+    verify(task0).process(any(), any(), any());
+    verify(task1).process(any(), any(), any());
 
-    verify(offsetManager).buildCheckpoint(eq(taskName0));
-    verify(offsetManager).writeCheckpoint(eq(taskName0), any(Checkpoint.class));
-    verify(offsetManager).buildCheckpoint(eq(taskName1));
-    verify(offsetManager).writeCheckpoint(eq(taskName1), any(Checkpoint.class));
+    verify(task0).commit();
+    verify(task1).commit();
   }
 
   @Test
-  public void testShutdownOnConsensus() throws Exception {
-    CountDownLatch task0ProcessedMessagesLatch = new CountDownLatch(1);
-    CountDownLatch task1ProcessedMessagesLatch = new CountDownLatch(1);
-
+  public void testShutdownOnConsensus() {
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
-
-    TestTask task0 = new TestTask(true, true, true, task0ProcessedMessagesLatch);
-    task0.setShutdownRequest(TaskCoordinator.RequestScope.CURRENT_TASK);
-    TestTask task1 = new TestTask(true, false, true, task1ProcessedMessagesLatch);
-    task1.setShutdownRequest(TaskCoordinator.RequestScope.CURRENT_TASK);
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
-
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
-
-    tasks.put(taskName0, createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer));
-    tasks.put(taskName1, createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer));
 
     int maxMessagesInFlight = 1;
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+
+      TaskCallback callback = callbackFactory.createCallback();
+      coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+      callback.complete();
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
+
+    RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+
+      TaskCallback callback = callbackFactory.createCallback();
+      coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+      callback.complete();
+      return null;
+    }).when(task1).process(eq(envelope11), any(), any());
+
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(taskName0, task0);
+    tasks.put(taskName1, task1);
 
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     // consensus is reached after envelope1 is processed.
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0).thenReturn(envelope1).thenReturn(null);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope11).thenReturn(null);
     runLoop.run();
 
-    task0ProcessedMessagesLatch.await();
-    task1ProcessedMessagesLatch.await();
+    verify(task0).process(any(), any(), any());
+    verify(task1).process(any(), any(), any());
 
-    assertEquals(1, task0.processed);
-    assertEquals(1, task0.completed.get());
-    assertEquals(1, task1.processed);
-    assertEquals(1, task1.completed.get());
     assertEquals(2L, containerMetrics.envelopes().getCount());
     assertEquals(2L, containerMetrics.processes().getCount());
   }
 
   @Test
-  public void testEndOfStreamWithMultipleTasks() throws Exception {
-    CountDownLatch task0ProcessedMessagesLatch = new CountDownLatch(1);
-    CountDownLatch task1ProcessedMessagesLatch = new CountDownLatch(1);
-
+  public void testEndOfStreamWithMultipleTasks() {
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
 
-    TestTask task0 = new TestTask(true, true, false, task0ProcessedMessagesLatch);
-    TestTask task1 = new TestTask(true, true, false, task1ProcessedMessagesLatch);
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    RunLoopTask task1 = getMockRunLoopTask(taskName1, ssp1);
 
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
 
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
+    tasks.put(taskName0, task0);
+    tasks.put(taskName1, task1);
 
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, false);
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
     when(consumerMultiplexer.choose(false))
-      .thenReturn(envelope0)
-      .thenReturn(envelope1)
+      .thenReturn(envelope00)
+      .thenReturn(envelope11)
       .thenReturn(ssp0EndOfStream)
       .thenReturn(ssp1EndOfStream)
       .thenReturn(null);
 
     runLoop.run();
 
-    task0ProcessedMessagesLatch.await();
-    task1ProcessedMessagesLatch.await();
+    verify(task0).process(eq(envelope00), any(), any());
+    verify(task0).endOfStream(any());
 
-    assertEquals(1, task0.processed);
-    assertEquals(1, task0.completed.get());
-    assertEquals(1, task1.processed);
-    assertEquals(1, task1.completed.get());
+    verify(task1).process(eq(envelope11), any(), any());
+    verify(task1).endOfStream(any());
+
     assertEquals(4L, containerMetrics.envelopes().getCount());
-    assertEquals(2L, containerMetrics.processes().getCount());
   }
 
   @Test
-  public void testEndOfStreamWithOutOfOrderProcess() throws Exception {
+  public void testEndOfStreamWaitsForInFlightMessages() {
     int maxMessagesInFlight = 2;
-
-    CountDownLatch task0ProcessedMessagesLatch = new CountDownLatch(2);
-    CountDownLatch task1ProcessedMessagesLatch = new CountDownLatch(1);
-
+    ExecutorService taskExecutor = Executors.newFixedThreadPool(1);
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
     OffsetManager offsetManager = mock(OffsetManager.class);
 
-    TestTask task0 = new TestTask(true, true, false, task0ProcessedMessagesLatch, maxMessagesInFlight);
-    TestTask task1 = new TestTask(true, true, false, task1ProcessedMessagesLatch, maxMessagesInFlight);
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    when(task0.offsetManager()).thenReturn(offsetManager);
+    CountDownLatch firstMessageBarrier = new CountDownLatch(2);
+    doAnswer(invocation -> {
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      taskExecutor.submit(() -> {
+        firstMessageBarrier.await();
+        callback.complete();
+        return null;
+      });
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
+    doAnswer(invocation -> {
+      assertEquals(1, task0.metrics().messagesInFlight().getValue());
 
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+      callback.complete();
+      firstMessageBarrier.countDown();
+      return null;
+    }).when(task0).process(eq(envelope01), any(), any());
 
-    task0.callbackHandler = buildOutofOrderCallback(task0);
+    doAnswer(invocation -> {
+      assertEquals(0, task0.metrics().messagesInFlight().getValue());
+      assertEquals(2, task0.metrics().asyncCallbackCompleted().getCount());
+
+      return null;
+    }).when(task0).endOfStream(any());
+
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(taskName0, task0);
+
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
                                             callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0).thenReturn(envelope3).thenReturn(envelope1).thenReturn(null).thenReturn(ssp0EndOfStream).thenReturn(ssp1EndOfStream).thenReturn(null);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope01).thenReturn(ssp0EndOfStream)
+        .thenAnswer(invocation -> {
+          // this ensures that the end of stream message has passed through run loop BEFORE the last remaining in flight message completes
+          firstMessageBarrier.countDown();
+          return null;
+        });
 
     runLoop.run();
 
-    task0ProcessedMessagesLatch.await();
-    task1ProcessedMessagesLatch.await();
-
-    assertEquals(2, task0.processed);
-    assertEquals(2, task0.completed.get());
-    assertEquals(1, task1.processed);
-    assertEquals(1, task1.completed.get());
-    assertEquals(5L, containerMetrics.envelopes().getCount());
-    assertEquals(3L, containerMetrics.processes().getCount());
+    verify(task0).endOfStream(any());
   }
 
   @Test
-  public void testEndOfStreamCommitBehavior() throws Exception {
-    CountDownLatch task0ProcessedMessagesLatch = new CountDownLatch(1);
-    CountDownLatch task1ProcessedMessagesLatch = new CountDownLatch(1);
-
+  public void testEndOfStreamCommitBehavior() {
     SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
 
-    //explicitly configure to disable commits inside process or window calls and invoke commit from end of stream
-    TestTask task0 = new TestTask(true, false, false, task0ProcessedMessagesLatch);
-    TestTask task1 = new TestTask(true, false, false, task1ProcessedMessagesLatch);
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(0, ReadableCoordinator.class);
 
-    TaskInstance t0 = createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer);
-    TaskInstance t1 = createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer);
+      coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+      return null;
+    }).when(task0).endOfStream(any());
 
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
 
-    tasks.put(taskName0, t0);
-    tasks.put(taskName1, t1);
+    tasks.put(taskName0, task0);
+
     int maxMessagesInFlight = 1;
     RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
                                             callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope0).thenReturn(envelope1).thenReturn(null).thenReturn(ssp0EndOfStream).thenReturn(ssp1EndOfStream).thenReturn(null);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(ssp0EndOfStream).thenReturn(null);
 
     runLoop.run();
 
-    task0ProcessedMessagesLatch.await();
-    task1ProcessedMessagesLatch.await();
+    InOrder inOrder = inOrder(task0);
 
-    verify(offsetManager).buildCheckpoint(eq(taskName0));
-    verify(offsetManager).writeCheckpoint(eq(taskName0), any(Checkpoint.class));
-    verify(offsetManager).buildCheckpoint(eq(taskName1));
-    verify(offsetManager).writeCheckpoint(eq(taskName1), any(Checkpoint.class));
+    inOrder.verify(task0).endOfStream(any());
+    inOrder.verify(task0).commit();
   }
 
   @Test
-  public void testEndOfStreamOffsetManagement() throws Exception {
-    //explicitly configure to disable commits inside process or window calls and invoke commit from end of stream
-    TestTask mockStreamTask1 = new TestTask(true, false, false, null);
-    TestTask mockStreamTask2 = new TestTask(true, false, false, null);
-
-    Partition p1 = new Partition(1);
-    Partition p2 = new Partition(2);
-    SystemStreamPartition ssp1 = new SystemStreamPartition("system1", "stream1", p1);
-    SystemStreamPartition ssp2 = new SystemStreamPartition("system1", "stream2", p2);
-    IncomingMessageEnvelope envelope1 = new IncomingMessageEnvelope(ssp2, "1", "key1", "message1");
-    IncomingMessageEnvelope envelope2 = new IncomingMessageEnvelope(ssp2, "2", "key1", "message1");
-    IncomingMessageEnvelope envelope3 = IncomingMessageEnvelope.buildEndOfStreamEnvelope(ssp2);
-
-    Map<SystemStreamPartition, List<IncomingMessageEnvelope>> sspMap = new HashMap<>();
-    List<IncomingMessageEnvelope> messageList = new ArrayList<>();
-    messageList.add(envelope1);
-    messageList.add(envelope2);
-    messageList.add(envelope3);
-    sspMap.put(ssp2, messageList);
-
-    SystemConsumer mockConsumer = mock(SystemConsumer.class);
-    when(mockConsumer.poll(anyObject(), anyLong())).thenReturn(sspMap);
-
-    SystemAdmins systemAdmins = Mockito.mock(SystemAdmins.class);
-    Mockito.when(systemAdmins.getSystemAdmin("system1")).thenReturn(Mockito.mock(SystemAdmin.class));
-    Mockito.when(systemAdmins.getSystemAdmin("testSystem")).thenReturn(Mockito.mock(SystemAdmin.class));
-
-    HashMap<String, SystemConsumer> systemConsumerMap = new HashMap<>();
-    systemConsumerMap.put("system1", mockConsumer);
-
-    SystemConsumers consumers = TestSystemConsumers.getSystemConsumers(systemConsumerMap, systemAdmins);
-
-    TaskName taskName1 = new TaskName("task1");
-    TaskName taskName2 = new TaskName("task2");
-
+  public void testCommitWithMessageInFlightWhenAsyncCommitIsEnabled() {
+    int maxMessagesInFlight = 2;
+    ExecutorService taskExecutor = Executors.newFixedThreadPool(2);
+    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
     OffsetManager offsetManager = mock(OffsetManager.class);
 
-    when(offsetManager.getLastProcessedOffset(taskName1, ssp1)).thenReturn(Option.apply("3"));
-    when(offsetManager.getLastProcessedOffset(taskName2, ssp2)).thenReturn(Option.apply("0"));
-    when(offsetManager.getStartingOffset(taskName1, ssp1)).thenReturn(Option.apply(IncomingMessageEnvelope.END_OF_STREAM_OFFSET));
-    when(offsetManager.getStartingOffset(taskName2, ssp2)).thenReturn(Option.apply("1"));
-    when(offsetManager.getStartpoint(anyObject(), anyObject())).thenReturn(Option.empty());
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    when(task0.offsetManager()).thenReturn(offsetManager);
+    CountDownLatch firstMessageBarrier = new CountDownLatch(1);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
 
-    TaskInstance taskInstance1 = createTaskInstance(mockStreamTask1, taskName1, ssp1, offsetManager, consumers);
-    TaskInstance taskInstance2 = createTaskInstance(mockStreamTask2, taskName2, ssp2, offsetManager, consumers);
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-    tasks.put(taskName1, taskInstance1);
-    tasks.put(taskName2, taskInstance2);
+      taskExecutor.submit(() -> {
+        firstMessageBarrier.await();
+        coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK);
+        callback.complete();
+        return null;
+      });
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
 
-    taskInstance1.registerConsumers();
-    taskInstance2.registerConsumers();
-    consumers.start();
+    CountDownLatch secondMessageBarrier = new CountDownLatch(1);
+    doAnswer(invocation -> {
+      ReadableCoordinator coordinator = invocation.getArgumentAt(1, ReadableCoordinator.class);
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      TaskCallback callback = callbackFactory.createCallback();
+
+      taskExecutor.submit(() -> {
+        // let the first message proceed to ask for a commit
+        firstMessageBarrier.countDown();
+        // block this message until commit is executed
+        secondMessageBarrier.await();
+        coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK);
+        callback.complete();
+        return null;
+      });
+      return null;
+    }).when(task0).process(eq(envelope01), any(), any());
+
+    doAnswer(invocation -> {
+      assertEquals(1, task0.metrics().asyncCallbackCompleted().getCount());
+      assertEquals(1, task0.metrics().messagesInFlight().getValue());
+
+      secondMessageBarrier.countDown();
+      return null;
+    }).when(task0).commit();
+
+    Map<TaskName, RunLoopTask> tasks = new HashMap<>();
+    tasks.put(taskName0, task0);
+
+    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, true);
+    when(consumerMultiplexer.choose(false)).thenReturn(envelope00).thenReturn(envelope01).thenReturn(null);
+    runLoop.run();
+
+    InOrder inOrder = inOrder(task0);
+    inOrder.verify(task0).process(eq(envelope00), any(), any());
+    inOrder.verify(task0).process(eq(envelope01), any(), any());
+    inOrder.verify(task0).commit();
+  }
+
+  @Test(expected = SamzaException.class)
+  public void testExceptionIsPropagated() {
+    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
+
+    RunLoopTask task0 = getMockRunLoopTask(taskName0, ssp0);
+    doAnswer(invocation -> {
+      TaskCallbackFactory callbackFactory = invocation.getArgumentAt(2, TaskCallbackFactory.class);
+      callbackFactory.createCallback().failure(new Exception("Intentional failure"));
+      return null;
+    }).when(task0).process(eq(envelope00), any(), any());
+
+    Map<TaskName, RunLoopTask> tasks = ImmutableMap.of(taskName0, task0);
 
     int maxMessagesInFlight = 1;
-    RunLoop runLoop = new RunLoop(tasks, executor, consumers, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
+    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
+        callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
+
+    when(consumerMultiplexer.choose(false))
+        .thenReturn(envelope00)
+        .thenReturn(ssp0EndOfStream)
+        .thenReturn(null);
 
     runLoop.run();
   }
 
-  //@Test
-  public void testCommitBehaviourWhenAsyncCommitIsEnabled() throws InterruptedException {
-    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
-
-    int maxMessagesInFlight = 3;
-    TestTask task0 = new TestTask(true, true, false, null, maxMessagesInFlight);
-    task0.setCommitRequest(TaskCoordinator.RequestScope.CURRENT_TASK);
-    TestTask task1 = new TestTask(true, false, false, null, maxMessagesInFlight);
-
-    IncomingMessageEnvelope firstMsg = new IncomingMessageEnvelope(ssp0, "0", "key0", "value0");
-    IncomingMessageEnvelope secondMsg = new IncomingMessageEnvelope(ssp0, "1", "key1", "value1");
-    IncomingMessageEnvelope thirdMsg = new IncomingMessageEnvelope(ssp0, "2", "key0", "value0");
-
-    final CountDownLatch firstMsgCompletionLatch = new CountDownLatch(1);
-    final CountDownLatch secondMsgCompletionLatch = new CountDownLatch(1);
-    task0.callbackHandler = callback -> {
-      IncomingMessageEnvelope envelope = ((TaskCallbackImpl) callback).getEnvelope();
-      try {
-        if (envelope.equals(firstMsg)) {
-          firstMsgCompletionLatch.await();
-        } else if (envelope.equals(secondMsg)) {
-          firstMsgCompletionLatch.countDown();
-          secondMsgCompletionLatch.await();
-        } else if (envelope.equals(thirdMsg)) {
-          secondMsgCompletionLatch.countDown();
-          // OffsetManager.update with firstMsg offset, task.commit has happened when second message callback has not completed.
-          verify(offsetManager).update(eq(taskName0), eq(firstMsg.getSystemStreamPartition()), eq(firstMsg.getOffset()));
-        }
-      } catch (Exception e) {
-        e.printStackTrace();
-      }
-    };
-
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-
-    tasks.put(taskName0, createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer));
-    tasks.put(taskName1, createTaskInstance(task1, taskName1, ssp1, offsetManager, consumerMultiplexer));
-    when(consumerMultiplexer.choose(false)).thenReturn(firstMsg).thenReturn(secondMsg).thenReturn(thirdMsg).thenReturn(envelope1).thenReturn(ssp0EndOfStream).thenReturn(ssp1EndOfStream).thenReturn(null);
-
-    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics, () -> 0L, false);
-
-    runLoop.run();
-
-    firstMsgCompletionLatch.await();
-    secondMsgCompletionLatch.await();
-
-    verify(offsetManager, atLeastOnce()).buildCheckpoint(eq(taskName0));
-    verify(offsetManager, atLeastOnce()).writeCheckpoint(eq(taskName0), any(Checkpoint.class));
-    assertEquals(3, task0.processed);
-    assertEquals(3, task0.committed);
-    assertEquals(1, task1.processed);
-    assertEquals(0, task1.committed);
-  }
-
-  @Test
-  public void testProcessBehaviourWhenAsyncCommitIsEnabled() throws InterruptedException {
-    int maxMessagesInFlight = 2;
-
-    SystemConsumers consumerMultiplexer = mock(SystemConsumers.class);
-    when(consumerMultiplexer.pollIntervalMs()).thenReturn(10);
-    OffsetManager offsetManager = mock(OffsetManager.class);
-
-    TestTask task0 = new TestTask(true, true, false, null, maxMessagesInFlight);
-    CountDownLatch commitLatch = new CountDownLatch(1);
-    task0.commitHandler = callback -> {
-      TaskCallbackImpl taskCallback = (TaskCallbackImpl) callback;
-      if (taskCallback.getEnvelope().equals(envelope3)) {
-        try {
-          commitLatch.await();
-        } catch (InterruptedException e) {
-          e.printStackTrace();
-        }
-      }
-    };
-
-    task0.callbackHandler = callback -> {
-      TaskCallbackImpl taskCallback = (TaskCallbackImpl) callback;
-      if (taskCallback.getEnvelope().equals(envelope0)) {
-        // Both the process call has gone through when the first commit is in progress.
-        assertEquals(2, containerMetrics.processes().getCount());
-        assertEquals(0, containerMetrics.commits().getCount());
-        commitLatch.countDown();
-      }
-    };
-
-    Map<TaskName, TaskInstance> tasks = new HashMap<>();
-
-    tasks.put(taskName0, createTaskInstance(task0, taskName0, ssp0, offsetManager, consumerMultiplexer));
-    when(consumerMultiplexer.choose(false)).thenReturn(envelope3).thenReturn(envelope0).thenReturn(ssp0EndOfStream).thenReturn(null);
-    RunLoop runLoop = new RunLoop(tasks, executor, consumerMultiplexer, maxMessagesInFlight, windowMs, commitMs,
-                                            callbackTimeoutMs, maxThrottlingDelayMs, maxIdleMs, containerMetrics,
-                                            () -> 0L, true);
-
-    runLoop.run();
-
-    commitLatch.await();
+  private RunLoopTask getMockRunLoopTask(TaskName taskName, SystemStreamPartition ssp0) {
+    RunLoopTask task0 = mock(RunLoopTask.class);
+    when(task0.systemStreamPartitions()).thenReturn(Collections.singleton(ssp0));
+    when(task0.metrics()).thenReturn(new TaskInstanceMetrics("test", new MetricsRegistryMap()));
+    when(task0.taskName()).thenReturn(taskName);
+    return task0;
   }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java b/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java
index d21bb4b..6429a54 100644
--- a/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java
+++ b/samza-core/src/test/java/org/apache/samza/diagnostics/TestDiagnosticsManager.java
@@ -44,6 +44,7 @@
 public class TestDiagnosticsManager {
   private DiagnosticsManager diagnosticsManager;
   private MockSystemProducer mockSystemProducer;
+  private ScheduledExecutorService mockExecutorService;
   private SystemStream diagnosticsSystemStream = new SystemStream("kafka", "test stream");
 
   private String jobName = "Testjob";
@@ -68,13 +69,13 @@
     mockSystemProducer = new MockSystemProducer();
 
     // Mocked scheduled executor service which does a synchronous run() on scheduling
-    ScheduledExecutorService mockExecutorService = Mockito.mock(ScheduledExecutorService.class);
+    mockExecutorService = Mockito.mock(ScheduledExecutorService.class);
     Mockito.when(mockExecutorService.scheduleWithFixedDelay(Mockito.any(), Mockito.anyLong(), Mockito.anyLong(),
         Mockito.eq(TimeUnit.SECONDS))).thenAnswer(invocation -> {
-            ((Runnable) invocation.getArguments()[0]).run();
-            return Mockito.
-                mock(ScheduledFuture.class);
-          });
+          ((Runnable) invocation.getArguments()[0]).run();
+          return Mockito
+              .mock(ScheduledFuture.class);
+        });
 
     this.diagnosticsManager =
         new DiagnosticsManager(jobName, jobId, containerModels, containerMb, containerNumCores, numPersistentStores, maxHeapSize, containerThreadPoolSize,
@@ -82,12 +83,69 @@
             mockSystemProducer, Duration.ofSeconds(1), mockExecutorService, autosizingEnabled);
 
     exceptionEventList.forEach(
-        diagnosticsExceptionEvent -> this.diagnosticsManager.addExceptionEvent(diagnosticsExceptionEvent));
+      diagnosticsExceptionEvent -> this.diagnosticsManager.addExceptionEvent(diagnosticsExceptionEvent));
 
     this.diagnosticsManager.addProcessorStopEvent("0", executionEnvContainerId, hostname, 101);
   }
 
   @Test
+  public void testDiagnosticsManagerStart() {
+    SystemProducer mockSystemProducer = Mockito.mock(SystemProducer.class);
+    DiagnosticsManager diagnosticsManager =
+        new DiagnosticsManager(jobName, jobId, containerModels, containerMb, containerNumCores, numPersistentStores,
+            maxHeapSize, containerThreadPoolSize, "0", executionEnvContainerId, taskClassVersion, samzaVersion,
+            hostname, diagnosticsSystemStream, mockSystemProducer, Duration.ofSeconds(1), mockExecutorService,
+            autosizingEnabled);
+
+    diagnosticsManager.start();
+
+    Mockito.verify(mockSystemProducer, Mockito.times(1)).start();
+    Mockito.verify(mockExecutorService, Mockito.times(1))
+        .scheduleWithFixedDelay(Mockito.any(Runnable.class), Mockito.anyLong(), Mockito.anyLong(),
+            Mockito.any(TimeUnit.class));
+  }
+
+  @Test
+  public void testDiagnosticsManagerStop() throws InterruptedException {
+    SystemProducer mockSystemProducer = Mockito.mock(SystemProducer.class);
+    Mockito.when(mockExecutorService.isTerminated()).thenReturn(true);
+    Duration terminationDuration = Duration.ofSeconds(1);
+    DiagnosticsManager diagnosticsManager =
+        new DiagnosticsManager(jobName, jobId, containerModels, containerMb, containerNumCores, numPersistentStores,
+            maxHeapSize, containerThreadPoolSize, "0", executionEnvContainerId, taskClassVersion, samzaVersion,
+            hostname, diagnosticsSystemStream, mockSystemProducer, terminationDuration, mockExecutorService,
+            autosizingEnabled);
+
+    diagnosticsManager.stop();
+
+    Mockito.verify(mockExecutorService, Mockito.times(1)).shutdown();
+    Mockito.verify(mockExecutorService, Mockito.times(1))
+        .awaitTermination(terminationDuration.toMillis(), TimeUnit.MILLISECONDS);
+    Mockito.verify(mockExecutorService, Mockito.never()).shutdownNow();
+    Mockito.verify(mockSystemProducer, Mockito.times(1)).stop();
+  }
+
+  @Test
+  public void testDiagnosticsManagerForceStop() throws InterruptedException {
+    SystemProducer mockSystemProducer = Mockito.mock(SystemProducer.class);
+    Mockito.when(mockExecutorService.isTerminated()).thenReturn(false);
+    Duration terminationDuration = Duration.ofSeconds(1);
+    DiagnosticsManager diagnosticsManager =
+        new DiagnosticsManager(jobName, jobId, containerModels, containerMb, containerNumCores, numPersistentStores,
+            maxHeapSize, containerThreadPoolSize, "0", executionEnvContainerId, taskClassVersion, samzaVersion,
+            hostname, diagnosticsSystemStream, mockSystemProducer, terminationDuration, mockExecutorService,
+            autosizingEnabled);
+
+    diagnosticsManager.stop();
+
+    Mockito.verify(mockExecutorService, Mockito.times(1)).shutdown();
+    Mockito.verify(mockExecutorService, Mockito.times(1))
+        .awaitTermination(terminationDuration.toMillis(), TimeUnit.MILLISECONDS);
+    Mockito.verify(mockExecutorService, Mockito.times(1)).shutdownNow();
+    Mockito.verify(mockSystemProducer, Mockito.times(1)).stop();
+  }
+
+  @Test
   public void testDiagnosticsStreamFirstMessagePublish() {
     // invoking start will do a syncrhonous publish to the stream because of our mocked scheduled exec service
     this.diagnosticsManager.start();
diff --git a/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java b/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java
index 63d290a..c25c265 100644
--- a/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java
+++ b/samza-core/src/test/java/org/apache/samza/execution/TestExecutionPlanner.java
@@ -144,13 +144,13 @@
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc-> {
-        MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input1Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        input1
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-            .map(kv -> kv)
-            .sendTo(output1);
-      }, config);
+      MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input1Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      input1
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+          .map(kv -> kv)
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamStreamJoin() {
@@ -166,30 +166,30 @@
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 =
-            appDesc.getInputStream(input1Descriptor)
-                .map(m -> m);
-        MessageStream<KV<Object, Object>> messageStream2 =
-            appDesc.getInputStream(input2Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-                .filter(m -> true);
-        MessageStream<KV<Object, Object>> messageStream3 =
-            appDesc.getInputStream(input3Descriptor)
-                .filter(m -> true)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-                .map(m -> m);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 =
+          appDesc.getInputStream(input1Descriptor)
+              .map(m -> m);
+      MessageStream<KV<Object, Object>> messageStream2 =
+          appDesc.getInputStream(input2Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+              .filter(m -> true);
+      MessageStream<KV<Object, Object>> messageStream3 =
+          appDesc.getInputStream(input3Descriptor)
+              .filter(m -> true)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+              .map(m -> m);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
 
-        messageStream1
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .sendTo(output1);
-        messageStream3
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output2);
-      }, config);
+      messageStream1
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .sendTo(output1);
+      messageStream3
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output2);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithInvalidStreamStreamJoin() {
@@ -204,45 +204,45 @@
      *   input3 (32) --
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        messageStream1
-            .join(messageStream3,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .join(messageStream3,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithJoinAndWindow() {
 
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor).map(m -> m);
-        MessageStream<KV<Object, Object>> messageStream2 =
-          appDesc.getInputStream(input2Descriptor)
-              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-              .filter(m -> true);
-        MessageStream<KV<Object, Object>> messageStream3 =
-          appDesc.getInputStream(input3Descriptor)
-              .filter(m -> true)
-              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-              .map(m -> m);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
-
-        messageStream1.map(m -> m)
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor).map(m -> m);
+      MessageStream<KV<Object, Object>> messageStream2 =
+        appDesc.getInputStream(input2Descriptor)
+            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+            .filter(m -> true);
+      MessageStream<KV<Object, Object>> messageStream3 =
+        appDesc.getInputStream(input3Descriptor)
             .filter(m -> true)
-            .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(8), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w1");
+            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+            .map(m -> m);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> output2 = appDesc.getOutputStream(output2Descriptor);
 
-        messageStream2.map(m -> m)
-            .filter(m -> true)
-            .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(16), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w2");
+      messageStream1.map(m -> m)
+          .filter(m -> true)
+          .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(8), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w1");
 
-        messageStream1.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(1600), "j1").sendTo(output1);
-        messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(100), "j2").sendTo(output2);
-        messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(252), "j3").sendTo(output2);
-      }, config);
+      messageStream2.map(m -> m)
+          .filter(m -> true)
+          .window(Windows.keyedTumblingWindow(m -> m, Duration.ofMillis(16), (Serde<KV<Object, Object>>) mock(Serde.class), (Serde<KV<Object, Object>>) mock(Serde.class)), "w2");
+
+      messageStream1.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(1600), "j1").sendTo(output1);
+      messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(100), "j2").sendTo(output2);
+      messageStream3.join(messageStream2, mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofMillis(252), "j3").sendTo(output2);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamTableJoin() {
@@ -261,26 +261,26 @@
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
-        MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
+      MessageStream<KV<Object, Object>> messageStream3 = appDesc.getInputStream(input3Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-            "table-id", new KVSerde(new StringSerde(), new StringSerde()));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+          "table-id", new KVSerde(new StringSerde(), new StringSerde()));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream2
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-            .sendTo(table);
+      messageStream2
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+          .sendTo(table);
 
-        messageStream1
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-            .join(table, mock(StreamTableJoinFunction.class))
-            .join(messageStream3,
-                  mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+          .join(table, mock(StreamTableJoinFunction.class))
+          .join(messageStream3,
+                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithComplexStreamStreamJoin() {
@@ -305,37 +305,37 @@
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
 
-        MessageStream<KV<Object, Object>> messageStream2 =
-            appDesc.getInputStream(input2Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2");
+      MessageStream<KV<Object, Object>> messageStream2 =
+          appDesc.getInputStream(input2Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2");
 
-        MessageStream<KV<Object, Object>> messageStream3 =
-            appDesc.getInputStream(input3Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p3");
+      MessageStream<KV<Object, Object>> messageStream3 =
+          appDesc.getInputStream(input3Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p3");
 
-        MessageStream<KV<Object, Object>> messageStream4 =
-            appDesc.getInputStream(input4Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p4");
+      MessageStream<KV<Object, Object>> messageStream4 =
+          appDesc.getInputStream(input4Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p4");
 
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        messageStream1
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1")
-            .sendTo(output1);
+      messageStream1
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1")
+          .sendTo(output1);
 
-        messageStream3
-            .join(messageStream4,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output1);
+      messageStream3
+          .join(messageStream4,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output1);
 
-        messageStream2
-            .join(messageStream3,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j3")
-            .sendTo(output1);
-      }, config);
+      messageStream2
+          .join(messageStream3,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j3")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithInvalidStreamTableJoin() {
@@ -351,22 +351,22 @@
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream1.sendTo(table);
+      messageStream1.sendTo(table);
 
-        messageStream1
-            .join(table, mock(StreamTableJoinFunction.class))
-            .join(messageStream2,
-                mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .join(table, mock(StreamTableJoinFunction.class))
+          .join(messageStream2,
+              mock(JoinFunction.class), mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamTableJoinWithSideInputs() {
@@ -379,20 +379,20 @@
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream2 = appDesc.getInputStream(input2Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()))
-            .withSideInputs(Arrays.asList("input1"))
-            .withSideInputsProcessor(mock(SideInputsProcessor.class));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()))
+          .withSideInputs(Arrays.asList("input1"))
+          .withSideInputsProcessor(mock(SideInputsProcessor.class));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream2
-            .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-            .join(table, mock(StreamTableJoinFunction.class))
-            .sendTo(output1);
-      }, config);
+      messageStream2
+          .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+          .join(table, mock(StreamTableJoinFunction.class))
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithInvalidStreamTableJoinWithSideInputs() {
@@ -407,19 +407,19 @@
      *
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()))
-            .withSideInputs(Arrays.asList("input2"))
-            .withSideInputsProcessor(mock(SideInputsProcessor.class));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()))
+          .withSideInputs(Arrays.asList("input2"))
+          .withSideInputsProcessor(mock(SideInputsProcessor.class));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream1
-            .join(table, mock(StreamTableJoinFunction.class))
-            .sendTo(output1);
-      }, config);
+      messageStream1
+          .join(table, mock(StreamTableJoinFunction.class))
+          .sendTo(output1);
+    }, config);
   }
 
   private StreamApplicationDescriptorImpl createStreamGraphWithStreamTableJoinAndSendToSameTable() {
@@ -433,17 +433,17 @@
      * streams participating in stream-table joins. Please, refer to SAMZA SEP-16 for more details.
      */
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 = appDesc.getInputStream(input1Descriptor);
 
-        TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
-          "table-id", new KVSerde(new StringSerde(), new StringSerde()));
-        Table table = appDesc.getTable(tableDescriptor);
+      TableDescriptor tableDescriptor = new TestLocalTableDescriptor.MockLocalTableDescriptor(
+        "table-id", new KVSerde(new StringSerde(), new StringSerde()));
+      Table table = appDesc.getTable(tableDescriptor);
 
-        messageStream1
-          .join(table, mock(StreamTableJoinFunction.class))
-          .sendTo(table);
+      messageStream1
+        .join(table, mock(StreamTableJoinFunction.class))
+        .sendTo(table);
 
-      }, config);
+    }, config);
   }
 
   @Before
@@ -535,8 +535,8 @@
     assertTrue(jobGraph.getOrCreateStreamEdge(output2Spec).getPartitionCount() == 16);
 
     jobGraph.getIntermediateStreamEdges().forEach(edge -> {
-        assertTrue(edge.getPartitionCount() == -1);
-      });
+      assertTrue(edge.getPartitionCount() == -1);
+    });
   }
 
   @Test
@@ -547,8 +547,8 @@
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount());
-      });
+      assertEquals(64, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -569,8 +569,8 @@
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount());
-      });
+      assertEquals(64, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -582,8 +582,8 @@
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount()); // max of input1 and output1
-      });
+      assertEquals(64, edge.getPartitionCount()); // max of input1 and output1
+    });
   }
 
   @Test
@@ -595,8 +595,8 @@
 
     // Partitions should be the same as input3
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(32, edge.getPartitionCount());
-      });
+      assertEquals(32, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -608,8 +608,8 @@
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(64, edge.getPartitionCount());
-      });
+      assertEquals(64, edge.getPartitionCount());
+    });
   }
 
   @Test
@@ -633,8 +633,8 @@
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertTrue(edge.getPartitionCount() == DEFAULT_PARTITIONS);
-      });
+      assertTrue(edge.getPartitionCount() == DEFAULT_PARTITIONS);
+    });
   }
 
   @Test
@@ -659,17 +659,17 @@
 
     ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input4Descriptor);
-        OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
-        input1.partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1").map(kv -> kv).sendTo(output1);
-      }, config);
+      MessageStream<KV<Object, Object>> input1 = appDesc.getInputStream(input4Descriptor);
+      OutputStream<KV<Object, Object>> output1 = appDesc.getOutputStream(output1Descriptor);
+      input1.partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1").map(kv -> kv).sendTo(output1);
+    }, config);
 
     JobGraph jobGraph = (JobGraph) planner.plan(graphSpec);
 
     // Partitions should be the same as input1
     jobGraph.getIntermediateStreams().forEach(edge -> {
-        assertEquals(partitionLimit, edge.getPartitionCount()); // max of input1 and output1
-      });
+      assertEquals(partitionLimit, edge.getPartitionCount()); // max of input1 and output1
+    });
   }
 
   @Test(expected = SamzaException.class)
@@ -836,10 +836,10 @@
         .filter(streamId -> inputDescriptors.containsKey(streamId)).collect(Collectors.toList()).isEmpty());
     Set<String> intermediateStreams = new HashSet<>(inputDescriptors.keySet());
     jobGraph.getInputStreams().forEach(edge -> {
-        if (intermediateStreams.contains(edge.getStreamSpec().getId())) {
-          intermediateStreams.remove(edge.getStreamSpec().getId());
-        }
-      });
+      if (intermediateStreams.contains(edge.getStreamSpec().getId())) {
+        intermediateStreams.remove(edge.getStreamSpec().getId());
+      }
+    });
     assertEquals(new HashSet<>(Arrays.asList(intermediateStream1, intermediateBroadcast)), intermediateStreams);
   }
 
diff --git a/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java b/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java
index eee8fd0..665b70a 100644
--- a/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java
+++ b/samza-core/src/test/java/org/apache/samza/execution/TestJobGraphJsonGenerator.java
@@ -31,6 +31,11 @@
 import org.apache.samza.config.Config;
 import org.apache.samza.config.JobConfig;
 import org.apache.samza.config.MapConfig;
+import org.apache.samza.operators.functions.StreamTableJoinFunction;
+import org.apache.samza.operators.spec.OperatorSpec;
+import org.apache.samza.operators.spec.OperatorSpecs;
+import org.apache.samza.operators.spec.SendToTableOperatorSpec;
+import org.apache.samza.operators.spec.StreamTableJoinOperatorSpec;
 import org.apache.samza.system.descriptors.GenericInputDescriptor;
 import org.apache.samza.system.descriptors.GenericOutputDescriptor;
 import org.apache.samza.system.descriptors.GenericSystemDescriptor;
@@ -174,43 +179,43 @@
     StreamManager streamManager = new StreamManager(systemAdmins);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        KVSerde<Object, Object> kvSerde = new KVSerde<>(new NoOpSerde(), new NoOpSerde());
-        String mockSystemFactoryClass = "factory.class.name";
-        GenericSystemDescriptor system1 = new GenericSystemDescriptor("system1", mockSystemFactoryClass);
-        GenericSystemDescriptor system2 = new GenericSystemDescriptor("system2", mockSystemFactoryClass);
-        GenericInputDescriptor<KV<Object, Object>> input1Descriptor = system1.getInputDescriptor("input1", kvSerde);
-        GenericInputDescriptor<KV<Object, Object>> input2Descriptor = system2.getInputDescriptor("input2", kvSerde);
-        GenericInputDescriptor<KV<Object, Object>> input3Descriptor = system2.getInputDescriptor("input3", kvSerde);
-        GenericOutputDescriptor<KV<Object, Object>>  output1Descriptor = system1.getOutputDescriptor("output1", kvSerde);
-        GenericOutputDescriptor<KV<Object, Object>> output2Descriptor = system2.getOutputDescriptor("output2", kvSerde);
+      KVSerde<Object, Object> kvSerde = new KVSerde<>(new NoOpSerde(), new NoOpSerde());
+      String mockSystemFactoryClass = "factory.class.name";
+      GenericSystemDescriptor system1 = new GenericSystemDescriptor("system1", mockSystemFactoryClass);
+      GenericSystemDescriptor system2 = new GenericSystemDescriptor("system2", mockSystemFactoryClass);
+      GenericInputDescriptor<KV<Object, Object>> input1Descriptor = system1.getInputDescriptor("input1", kvSerde);
+      GenericInputDescriptor<KV<Object, Object>> input2Descriptor = system2.getInputDescriptor("input2", kvSerde);
+      GenericInputDescriptor<KV<Object, Object>> input3Descriptor = system2.getInputDescriptor("input3", kvSerde);
+      GenericOutputDescriptor<KV<Object, Object>>  output1Descriptor = system1.getOutputDescriptor("output1", kvSerde);
+      GenericOutputDescriptor<KV<Object, Object>> output2Descriptor = system2.getOutputDescriptor("output2", kvSerde);
 
-        MessageStream<KV<Object, Object>> messageStream1 =
-            appDesc.getInputStream(input1Descriptor)
-                .map(m -> m);
-        MessageStream<KV<Object, Object>> messageStream2 =
-            appDesc.getInputStream(input2Descriptor)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
-                .filter(m -> true);
-        MessageStream<KV<Object, Object>> messageStream3 =
-            appDesc.getInputStream(input3Descriptor)
-                .filter(m -> true)
-                .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
-                .map(m -> m);
-        OutputStream<KV<Object, Object>> outputStream1 = appDesc.getOutputStream(output1Descriptor);
-        OutputStream<KV<Object, Object>> outputStream2 = appDesc.getOutputStream(output2Descriptor);
+      MessageStream<KV<Object, Object>> messageStream1 =
+          appDesc.getInputStream(input1Descriptor)
+              .map(m -> m);
+      MessageStream<KV<Object, Object>> messageStream2 =
+          appDesc.getInputStream(input2Descriptor)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p1")
+              .filter(m -> true);
+      MessageStream<KV<Object, Object>> messageStream3 =
+          appDesc.getInputStream(input3Descriptor)
+              .filter(m -> true)
+              .partitionBy(m -> m.key, m -> m.value, mock(KVSerde.class), "p2")
+              .map(m -> m);
+      OutputStream<KV<Object, Object>> outputStream1 = appDesc.getOutputStream(output1Descriptor);
+      OutputStream<KV<Object, Object>> outputStream2 = appDesc.getOutputStream(output2Descriptor);
 
-        messageStream1
-            .join(messageStream2,
-                (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .sendTo(outputStream1);
-        messageStream2.sink((message, collector, coordinator) -> { });
-        messageStream3
-            .join(messageStream2,
-                (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(outputStream2);
-      }, config);
+      messageStream1
+          .join(messageStream2,
+              (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .sendTo(outputStream1);
+      messageStream2.sink((message, collector, coordinator) -> { });
+      messageStream3
+          .join(messageStream2,
+              (JoinFunction<Object, KV<Object, Object>, KV<Object, Object>, KV<Object, Object>>) mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(outputStream2);
+    }, config);
 
     ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
     ExecutionPlan plan = planner.plan(graphSpec);
@@ -250,27 +255,24 @@
     StreamManager streamManager = new StreamManager(systemAdmins);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        KVSerde<String, PageViewEvent> pvSerde = KVSerde.of(new StringSerde(), new JsonSerdeV2<>(PageViewEvent.class));
-        GenericSystemDescriptor isd = new GenericSystemDescriptor("hdfs", "mockSystemFactoryClass");
-        GenericInputDescriptor<KV<String, PageViewEvent>> pageView = isd.getInputDescriptor("PageView", pvSerde);
+      KVSerde<String, PageViewEvent> pvSerde = KVSerde.of(new StringSerde(), new JsonSerdeV2<>(PageViewEvent.class));
+      GenericSystemDescriptor isd = new GenericSystemDescriptor("hdfs", "mockSystemFactoryClass");
+      GenericInputDescriptor<KV<String, PageViewEvent>> pageView = isd.getInputDescriptor("PageView", pvSerde);
 
-        KVSerde<String, Long> pvcSerde = KVSerde.of(new StringSerde(), new LongSerde());
-        GenericSystemDescriptor osd = new GenericSystemDescriptor("kafka", "mockSystemFactoryClass");
-        GenericOutputDescriptor<KV<String, Long>> pageViewCount = osd.getOutputDescriptor("PageViewCount", pvcSerde);
+      KVSerde<String, Long> pvcSerde = KVSerde.of(new StringSerde(), new LongSerde());
+      GenericSystemDescriptor osd = new GenericSystemDescriptor("kafka", "mockSystemFactoryClass");
+      GenericOutputDescriptor<KV<String, Long>> pageViewCount = osd.getOutputDescriptor("PageViewCount", pvcSerde);
 
-        MessageStream<KV<String, PageViewEvent>> inputStream = appDesc.getInputStream(pageView);
-        OutputStream<KV<String, Long>> outputStream = appDesc.getOutputStream(pageViewCount);
-        inputStream
-            .partitionBy(kv -> kv.getValue().getCountry(), kv -> kv.getValue(), pvSerde, "keyed-by-country")
-            .window(Windows.keyedTumblingWindow(kv -> kv.getValue().getCountry(),
-                Duration.ofSeconds(10L),
-                () -> 0L,
-                (m, c) -> c + 1L,
-                new StringSerde(),
-                new LongSerde()), "count-by-country")
-            .map(pane -> new KV<>(pane.getKey().getKey(), pane.getMessage()))
-            .sendTo(outputStream);
-      }, config);
+      MessageStream<KV<String, PageViewEvent>> inputStream = appDesc.getInputStream(pageView);
+      OutputStream<KV<String, Long>> outputStream = appDesc.getOutputStream(pageViewCount);
+      inputStream
+          .partitionBy(kv -> kv.getValue().getCountry(), kv -> kv.getValue(), pvSerde, "keyed-by-country")
+          .window(Windows.keyedTumblingWindow(kv -> kv.getValue().getCountry(),
+              Duration.ofSeconds(10L), () -> 0L, (m, c) -> c + 1L, new StringSerde(), new LongSerde()),
+              "count-by-country")
+          .map(pane -> new KV<>(pane.getKey().getKey(), pane.getMessage()))
+          .sendTo(outputStream);
+    }, config);
 
     ExecutionPlanner planner = new ExecutionPlanner(config, streamManager);
     ExecutionPlan plan = planner.plan(graphSpec);
@@ -368,4 +370,25 @@
       return "";
     }
   }
+
+  @Test
+  public void testOperatorToMapForTable() {
+    JobGraphJsonGenerator jsonGenerator = new JobGraphJsonGenerator();
+    Map<String, Object> map;
+    SendToTableOperatorSpec<Object, Object> sendToTableOperatorSpec =
+        OperatorSpecs.createSendToTableOperatorSpec("test-sent-to-table", "test-sent-to");
+    map = jsonGenerator.operatorToMap(sendToTableOperatorSpec);
+    assertTrue(map.containsKey("tableId"));
+    assertEquals(map.get("tableId"), "test-sent-to-table");
+    assertEquals(map.get("opCode"), OperatorSpec.OpCode.SEND_TO.name());
+    assertEquals(map.get("opId"), "test-sent-to");
+    StreamTableJoinOperatorSpec<String, String, String, String> streamTableJoinOperatorSpec =
+        OperatorSpecs.createStreamTableJoinOperatorSpec("test-join-table", mock(StreamTableJoinFunction.class), "test-join");
+    map = jsonGenerator.operatorToMap(streamTableJoinOperatorSpec);
+    assertTrue(map.containsKey("tableId"));
+    assertEquals(map.get("tableId"), "test-join-table");
+    assertEquals(map.get("opCode"), OperatorSpec.OpCode.JOIN.name());
+    assertEquals(map.get("opId"), "test-join");
+  }
+
 }
diff --git a/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java b/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java
index 2d80e79..93b712f 100644
--- a/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java
+++ b/samza-core/src/test/java/org/apache/samza/execution/TestJobNodeConfigurationGenerator.java
@@ -289,8 +289,8 @@
     SerializableSerde<Serde> serializableSerde = new SerializableSerde<>();
     assertEquals(numSerdes, serializers.size());
     return serializers.entrySet().stream().collect(Collectors.toMap(
-        e -> e.getKey().replace(SerializerConfig.SERIALIZED_INSTANCE_SUFFIX, ""),
-        e -> serializableSerde.fromBytes(Base64.getDecoder().decode(e.getValue().getBytes()))
+      e -> e.getKey().replace(SerializerConfig.SERIALIZED_INSTANCE_SUFFIX, ""),
+      e -> serializableSerde.fromBytes(Base64.getDecoder().decode(e.getValue().getBytes()))
     ));
   }
 
diff --git a/samza-core/src/test/java/org/apache/samza/execution/TestJobPlanner.java b/samza-core/src/test/java/org/apache/samza/execution/TestJobPlanner.java
index 49bfd7f..68e4919 100644
--- a/samza-core/src/test/java/org/apache/samza/execution/TestJobPlanner.java
+++ b/samza-core/src/test/java/org/apache/samza/execution/TestJobPlanner.java
@@ -20,10 +20,15 @@
 package org.apache.samza.execution;
 
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
+import org.apache.samza.application.LegacyTaskApplication;
+import org.apache.samza.application.descriptors.ApplicationDescriptorImpl;
+import org.apache.samza.config.JobConfig;
 import org.apache.samza.config.MapConfig;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestJobPlanner {
 
@@ -59,4 +64,26 @@
     Assert.assertEquals(generatedConfig.get("job.id"), "should-exist-id");
   }
 
+  @Test
+  public void testRunIdisConfiguredForAllTypesOfApps() {
+    Map<String, String> testConfig = new HashMap<>();
+    testConfig.put("app.id", "should-exist-id");
+    testConfig.put("app.name", "should-exist-name");
+
+    ApplicationDescriptorImpl applicationDescriptor = Mockito.mock(ApplicationDescriptorImpl.class);
+
+    Mockito.when(applicationDescriptor.getConfig()).thenReturn(new MapConfig(testConfig));
+    Mockito.when(applicationDescriptor.getAppClass()).thenReturn(LegacyTaskApplication.class);
+
+    JobPlanner jobPlanner = new JobPlanner(applicationDescriptor) {
+      @Override
+      public List<JobConfig> prepareJobs() {
+        return null;
+      }
+    };
+
+    ExecutionPlan plan = jobPlanner.getExecutionPlan("custom-run-id");
+    Assert.assertNotNull(plan.getApplicationConfig().getRunId(), "custom-run-id");
+  }
+
 }
diff --git a/samza-core/src/test/java/org/apache/samza/metrics/TestMetricsSnapshotReporter.java b/samza-core/src/test/java/org/apache/samza/metrics/TestMetricsSnapshotReporter.java
index 1ddf70f..1f69a7e 100644
--- a/samza-core/src/test/java/org/apache/samza/metrics/TestMetricsSnapshotReporter.java
+++ b/samza-core/src/test/java/org/apache/samza/metrics/TestMetricsSnapshotReporter.java
@@ -19,23 +19,54 @@
 
 package org.apache.samza.metrics;
 
+import java.util.List;
+import java.util.Map;
+import org.apache.samza.metrics.reporter.MetricsSnapshot;
 import org.apache.samza.metrics.reporter.MetricsSnapshotReporter;
 import org.apache.samza.serializers.MetricsSnapshotSerdeV2;
+import org.apache.samza.serializers.Serializer;
+import org.apache.samza.system.OutgoingMessageEnvelope;
+import org.apache.samza.system.SystemProducer;
 import org.apache.samza.system.SystemStream;
-import org.apache.samza.system.inmemory.InMemorySystemProducer;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
 import scala.Some;
 import scala.runtime.AbstractFunction0;
 
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.eq;
+
 
 public class TestMetricsSnapshotReporter {
+
   private MetricsSnapshotReporter metricsSnapshotReporter;
   private static final String BLACKLIST_ALL = ".*";
   private static final String BLACKLIST_NONE = "";
   private static final String BLACKLIST_GROUPS = ".*(SystemConsumersMetrics|CachedStoreMetrics).*";
   private static final String BLACKLIST_ALL_BUT_TWO_GROUPS = "^(?!.*?(?:SystemConsumersMetrics|CachedStoreMetrics)).*$";
 
+  private static final SystemStream SYSTEM_STREAM = new SystemStream("test system", "test stream");
+  private static final String JOB_NAME = "test job";
+  private static final String JOB_ID = "test jobID";
+  private static final String CONTAINER_NAME = "samza-container-0";
+  private static final String TASK_VERSION = "test version";
+  private static final String SAMZA_VERSION = "test samza version";
+  private static final String HOSTNAME = "test host";
+  private static final int REPORTING_INTERVAL = 60000;
+
+  private Serializer<MetricsSnapshot> serializer;
+  private SystemProducer producer;
+
+  @Before
+  public void setup() {
+    producer = mock(SystemProducer.class);
+    serializer = new MetricsSnapshotSerdeV2();
+  }
+
   @Test
   public void testBlacklistAll() {
     this.metricsSnapshotReporter = getMetricsSnapshotReporter(BLACKLIST_ALL);
@@ -101,15 +132,61 @@
             "poll-count"));
   }
 
+  @Test
+  public void testMetricsEmission() {
+    // setup
+    serializer = null;
+    String source = "testSource";
+    String group = "someGroup";
+    String metricName = "someName";
+    MetricsRegistryMap registry = new MetricsRegistryMap();
+
+    metricsSnapshotReporter = getMetricsSnapshotReporter(TestMetricsSnapshotReporter.BLACKLIST_NONE);
+    registry.newGauge(group, metricName, 42);
+    metricsSnapshotReporter.register(source, registry);
+
+    ArgumentCaptor<OutgoingMessageEnvelope> outgoingMessageEnvelopeArgumentCaptor =
+        ArgumentCaptor.forClass(OutgoingMessageEnvelope.class);
+
+    // run
+    metricsSnapshotReporter.run();
+
+    // assert
+    verify(producer, times(1)).send(eq(source), outgoingMessageEnvelopeArgumentCaptor.capture());
+    verify(producer, times(1)).flush(eq(source));
+
+    List<OutgoingMessageEnvelope> envelopes = outgoingMessageEnvelopeArgumentCaptor.getAllValues();
+
+    Assert.assertEquals(1, envelopes.size());
+
+    MetricsSnapshot metricsSnapshot = (MetricsSnapshot) envelopes.get(0).getMessage();
+
+    Assert.assertEquals(JOB_NAME, metricsSnapshot.getHeader().getJobName());
+    Assert.assertEquals(JOB_ID, metricsSnapshot.getHeader().getJobId());
+    Assert.assertEquals(CONTAINER_NAME, metricsSnapshot.getHeader().getContainerName());
+    Assert.assertEquals(source, metricsSnapshot.getHeader().getSource());
+    Assert.assertEquals(SAMZA_VERSION, metricsSnapshot.getHeader().getSamzaVersion());
+    Assert.assertEquals(TASK_VERSION, metricsSnapshot.getHeader().getVersion());
+    Assert.assertEquals(HOSTNAME, metricsSnapshot.getHeader().getHost());
+
+    Map<String, Map<String, Object>> metricMap = metricsSnapshot.getMetrics().getAsMap();
+    Assert.assertEquals(1, metricMap.size());
+    Assert.assertTrue(metricMap.containsKey(group));
+    Assert.assertTrue(metricMap.get(group).containsKey(metricName));
+    Assert.assertEquals(42, metricMap.get(group).get(metricName));
+  }
+
   private MetricsSnapshotReporter getMetricsSnapshotReporter(String blacklist) {
-    return new MetricsSnapshotReporter(new InMemorySystemProducer("test system", null),
-        new SystemStream("test system", "test stream"), 60000, "test job", "test jobID", "samza-container-0",
-        "test version", "test samza version", "test host", new MetricsSnapshotSerdeV2(), new Some<>(blacklist),
-        new AbstractFunction0<Object>() {
-          @Override
-          public Object apply() {
-            return System.currentTimeMillis();
-          }
-        });
+    return new MetricsSnapshotReporter(producer, SYSTEM_STREAM, REPORTING_INTERVAL, JOB_NAME, JOB_ID, CONTAINER_NAME,
+        TASK_VERSION, SAMZA_VERSION, HOSTNAME, serializer, new Some<>(blacklist), getClock());
+  }
+
+  private AbstractFunction0<Object> getClock() {
+    return new AbstractFunction0<Object>() {
+      @Override
+      public Object apply() {
+        return System.currentTimeMillis();
+      }
+    };
   }
 }
diff --git a/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java b/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java
index b310e6f..907dd7d 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/TestJoinOperator.java
@@ -98,15 +98,15 @@
     Config config = new MapConfig(mapConfig);
 
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        IntegerSerde integerSerde = new IntegerSerde();
-        KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
-        GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
-        GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor = sd.getInputDescriptor("inStream", kvSerde);
+      IntegerSerde integerSerde = new IntegerSerde();
+      KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
+      GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor = sd.getInputDescriptor("inStream", kvSerde);
 
-        MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor);
+      MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor);
 
-        inStream.join(inStream, new TestJoinFunction(), integerSerde, kvSerde, kvSerde, JOIN_TTL, "join");
-      }, config);
+      inStream.join(inStream, new TestJoinFunction(), integerSerde, kvSerde, kvSerde, JOIN_TTL, "join");
+    }, config);
 
     createStreamOperatorTask(new SystemClock(), streamAppDesc); // should throw an exception
   }
@@ -336,22 +336,22 @@
     Config config = new MapConfig(mapConfig);
 
     return new StreamApplicationDescriptorImpl(appDesc -> {
-        IntegerSerde integerSerde = new IntegerSerde();
-        KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
-        GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
-        GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor1 = sd.getInputDescriptor("inStream", kvSerde);
-        GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor2 = sd.getInputDescriptor("inStream2", kvSerde);
+      IntegerSerde integerSerde = new IntegerSerde();
+      KVSerde<Integer, Integer> kvSerde = KVSerde.of(integerSerde, integerSerde);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor("insystem", "mockFactoryClassName");
+      GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor1 = sd.getInputDescriptor("inStream", kvSerde);
+      GenericInputDescriptor<KV<Integer, Integer>> inputDescriptor2 = sd.getInputDescriptor("inStream2", kvSerde);
 
-        MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor1);
-        MessageStream<KV<Integer, Integer>> inStream2 = appDesc.getInputStream(inputDescriptor2);
+      MessageStream<KV<Integer, Integer>> inStream = appDesc.getInputStream(inputDescriptor1);
+      MessageStream<KV<Integer, Integer>> inStream2 = appDesc.getInputStream(inputDescriptor2);
 
-        inStream
-            .join(inStream2, joinFn, integerSerde, kvSerde, kvSerde, JOIN_TTL, "j1")
-            .sink((message, messageCollector, taskCoordinator) -> {
-                SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-                messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-              });
-      }, config);
+      inStream
+          .join(inStream2, joinFn, integerSerde, kvSerde, kvSerde, JOIN_TTL, "j1")
+          .sink((message, messageCollector, taskCoordinator) -> {
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
+    }, config);
   }
 
   private static class TestJoinFunction
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java b/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java
index 9ff9a4f..5e23653 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/TestControlMessageSender.java
@@ -60,11 +60,11 @@
     Set<Integer> partitions = new HashSet<>();
     MessageCollector collector = mock(MessageCollector.class);
     doAnswer(invocation -> {
-        OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
-        partitions.add((Integer) envelope.getPartitionKey());
-        assertEquals(envelope.getSystemStream(), systemStream);
-        return null;
-      }).when(collector).send(any());
+      OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
+      partitions.add((Integer) envelope.getPartitionKey());
+      assertEquals(envelope.getSystemStream(), systemStream);
+      return null;
+    }).when(collector).send(any());
 
     ControlMessageSender sender = new ControlMessageSender(metadataCache);
     WatermarkMessage watermark = new WatermarkMessage(System.currentTimeMillis(), "task 0");
@@ -88,11 +88,11 @@
     Set<Integer> partitions = new HashSet<>();
     MessageCollector collector = mock(MessageCollector.class);
     doAnswer(invocation -> {
-        OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
-        partitions.add((Integer) envelope.getPartitionKey());
-        assertEquals(envelope.getSystemStream(), systemStream);
-        return null;
-      }).when(collector).send(any());
+      OutgoingMessageEnvelope envelope = (OutgoingMessageEnvelope) invocation.getArguments()[0];
+      partitions.add((Integer) envelope.getPartitionKey());
+      assertEquals(envelope.getSystemStream(), systemStream);
+      return null;
+    }).when(collector).send(any());
 
     ControlMessageSender sender = new ControlMessageSender(metadataCache);
     WatermarkMessage watermark = new WatermarkMessage(System.currentTimeMillis(), "task 0");
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java b/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java
index 579c028..ea57479 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/TestOperatorImplGraph.java
@@ -132,17 +132,17 @@
     when(this.context.getJobContext().getConfig()).thenReturn(config);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        GenericOutputDescriptor outputDescriptor = sd.getOutputDescriptor(outputStreamId, mock(Serde.class));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        OutputStream<Object> outputStream = appDesc.getOutputStream(outputDescriptor);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      GenericOutputDescriptor outputDescriptor = sd.getOutputDescriptor(outputStreamId, mock(Serde.class));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      OutputStream<Object> outputStream = appDesc.getOutputStream(outputDescriptor);
 
-        inputStream
-            .filter(mock(FilterFunction.class))
-            .map(mock(MapFunction.class))
-            .sendTo(outputStream);
-      }, config);
+      inputStream
+          .filter(mock(FilterFunction.class))
+          .map(mock(MapFunction.class))
+          .sendTo(outputStream);
+    }, config);
 
     OperatorImplGraph opImplGraph =
         new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, mock(Clock.class));
@@ -184,19 +184,19 @@
     when(this.context.getJobContext().getConfig()).thenReturn(config);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = isd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        GenericOutputDescriptor outputDescriptor = osd.getOutputDescriptor(outputStreamId,
-            KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        OutputStream<KV<Integer, String>> outputStream = appDesc.getOutputStream(outputDescriptor);
+      GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = isd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      GenericOutputDescriptor outputDescriptor = osd.getOutputDescriptor(outputStreamId,
+          KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      OutputStream<KV<Integer, String>> outputStream = appDesc.getOutputStream(outputDescriptor);
 
-        inputStream
-            .partitionBy(Object::hashCode, Object::toString,
-                KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)), "p1")
-            .sendTo(outputStream);
-      }, config);
+      inputStream
+          .partitionBy(Object::hashCode, Object::toString,
+              KVSerde.of(mock(IntegerSerde.class), mock(StringSerde.class)), "p1")
+          .sendTo(outputStream);
+    }, config);
 
     JobModel jobModel = mock(JobModel.class);
     ContainerModel containerModel = mock(ContainerModel.class);
@@ -236,12 +236,12 @@
     Config config = new MapConfig(configMap);
     when(this.context.getJobContext().getConfig()).thenReturn(config);
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        inputStream.filter(mock(FilterFunction.class));
-        inputStream.map(mock(MapFunction.class));
-      }, config);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      inputStream.filter(mock(FilterFunction.class));
+      inputStream.map(mock(MapFunction.class));
+    }, config);
 
     OperatorImplGraph opImplGraph =
         new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, mock(Clock.class));
@@ -259,14 +259,14 @@
     String inputStreamId = "input";
     String inputSystem = "input-system";
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
-        MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
-        MessageStream<Object> stream1 = inputStream.filter(mock(FilterFunction.class));
-        MessageStream<Object> stream2 = inputStream.map(mock(MapFunction.class));
-        stream1.merge(Collections.singleton(stream2))
-            .map(new TestMapFunction<Object, Object>("test-map-1", (Function & Serializable) m -> m));
-      }, getConfig());
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor = sd.getInputDescriptor(inputStreamId, mock(Serde.class));
+      MessageStream<Object> inputStream = appDesc.getInputStream(inputDescriptor);
+      MessageStream<Object> stream1 = inputStream.filter(mock(FilterFunction.class));
+      MessageStream<Object> stream2 = inputStream.map(mock(MapFunction.class));
+      stream1.merge(Collections.singleton(stream2))
+          .map(new TestMapFunction<Object, Object>("test-map-1", (Function & Serializable) m -> m));
+    }, getConfig());
 
     TaskName mockTaskName = mock(TaskName.class);
     TaskModel taskModel = mock(TaskModel.class);
@@ -277,7 +277,7 @@
         new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, mock(Clock.class));
 
     Set<OperatorImpl> opSet = opImplGraph.getAllInputOperators().stream().collect(HashSet::new,
-        (s, op) -> addOperatorRecursively(s, op), HashSet::addAll);
+      (s, op) -> addOperatorRecursively(s, op), HashSet::addAll);
     Object[] mergeOps = opSet.stream().filter(op -> op.getOperatorSpec().getOpCode() == OpCode.MERGE).toArray();
     assertEquals(1, mergeOps.length);
     assertEquals(1, ((OperatorImpl) mergeOps[0]).registeredOperators.size());
@@ -309,15 +309,15 @@
         (BiFunction & Serializable) (m1, m2) -> KV.of(m1, m2), keyFn, keyFn);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
-        MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
-        MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
+      MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
+      MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
 
-        inputStream1.join(inputStream2, testJoinFunction,
-            mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1");
-      }, config);
+      inputStream1.join(inputStream2, testJoinFunction,
+          mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j1");
+    }, config);
 
     TaskName mockTaskName = mock(TaskName.class);
     TaskModel taskModel = mock(TaskModel.class);
@@ -377,19 +377,19 @@
     when(this.context.getTaskContext().getTaskModel()).thenReturn(taskModel);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
-        MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
-        MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
+      GenericSystemDescriptor sd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor1 = sd.getInputDescriptor(inputStreamId1, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor2 = sd.getInputDescriptor(inputStreamId2, mock(Serde.class));
+      MessageStream<Object> inputStream1 = appDesc.getInputStream(inputDescriptor1);
+      MessageStream<Object> inputStream2 = appDesc.getInputStream(inputDescriptor2);
 
-        Function mapFn = (Function & Serializable) m -> m;
-        inputStream1.map(new TestMapFunction<Object, Object>("1", mapFn))
-            .map(new TestMapFunction<Object, Object>("2", mapFn));
+      Function mapFn = (Function & Serializable) m -> m;
+      inputStream1.map(new TestMapFunction<Object, Object>("1", mapFn))
+          .map(new TestMapFunction<Object, Object>("2", mapFn));
 
-        inputStream2.map(new TestMapFunction<Object, Object>("3", mapFn))
-            .map(new TestMapFunction<Object, Object>("4", mapFn));
-      }, getConfig());
+      inputStream2.map(new TestMapFunction<Object, Object>("3", mapFn))
+          .map(new TestMapFunction<Object, Object>("4", mapFn));
+    }, getConfig());
 
     OperatorImplGraph opImplGraph = new OperatorImplGraph(graphSpec.getOperatorSpecGraph(), this.context, SystemClock.instance());
 
@@ -475,33 +475,33 @@
     when(this.context.getJobContext().getConfig()).thenReturn(config);
 
     StreamApplicationDescriptorImpl graphSpec = new StreamApplicationDescriptorImpl(appDesc -> {
-        GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
-        GenericInputDescriptor inputDescriptor1 = isd.getInputDescriptor(inputStreamId1, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor2 = isd.getInputDescriptor(inputStreamId2, mock(Serde.class));
-        GenericInputDescriptor inputDescriptor3 = isd.getInputDescriptor(inputStreamId3, mock(Serde.class));
-        GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
-        GenericOutputDescriptor outputDescriptor1 = osd.getOutputDescriptor(outputStreamId1, mock(Serde.class));
-        GenericOutputDescriptor outputDescriptor2 = osd.getOutputDescriptor(outputStreamId2, mock(Serde.class));
-        MessageStream messageStream1 = appDesc.getInputStream(inputDescriptor1).map(m -> m);
-        MessageStream messageStream2 = appDesc.getInputStream(inputDescriptor2).filter(m -> true);
-        MessageStream messageStream3 =
-            appDesc.getInputStream(inputDescriptor3)
-                .filter(m -> true)
-                .partitionBy(m -> "m", m -> m, mock(KVSerde.class),  "p1")
-                .map(m -> m);
-        OutputStream<Object> outputStream1 = appDesc.getOutputStream(outputDescriptor1);
-        OutputStream<Object> outputStream2 = appDesc.getOutputStream(outputDescriptor2);
+      GenericSystemDescriptor isd = new GenericSystemDescriptor(inputSystem, "mockFactoryClass");
+      GenericInputDescriptor inputDescriptor1 = isd.getInputDescriptor(inputStreamId1, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor2 = isd.getInputDescriptor(inputStreamId2, mock(Serde.class));
+      GenericInputDescriptor inputDescriptor3 = isd.getInputDescriptor(inputStreamId3, mock(Serde.class));
+      GenericSystemDescriptor osd = new GenericSystemDescriptor(outputSystem, "mockFactoryClass");
+      GenericOutputDescriptor outputDescriptor1 = osd.getOutputDescriptor(outputStreamId1, mock(Serde.class));
+      GenericOutputDescriptor outputDescriptor2 = osd.getOutputDescriptor(outputStreamId2, mock(Serde.class));
+      MessageStream messageStream1 = appDesc.getInputStream(inputDescriptor1).map(m -> m);
+      MessageStream messageStream2 = appDesc.getInputStream(inputDescriptor2).filter(m -> true);
+      MessageStream messageStream3 =
+          appDesc.getInputStream(inputDescriptor3)
+              .filter(m -> true)
+              .partitionBy(m -> "m", m -> m, mock(KVSerde.class),  "p1")
+              .map(m -> m);
+      OutputStream<Object> outputStream1 = appDesc.getOutputStream(outputDescriptor1);
+      OutputStream<Object> outputStream2 = appDesc.getOutputStream(outputDescriptor2);
 
-        messageStream1
-            .join(messageStream2, mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
-            .partitionBy(m -> "m", m -> m, mock(KVSerde.class), "p2")
-            .sendTo(outputStream1);
-        messageStream3
-            .join(messageStream2, mock(JoinFunction.class),
-                mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
-            .sendTo(outputStream2);
-      }, config);
+      messageStream1
+          .join(messageStream2, mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(2), "j1")
+          .partitionBy(m -> "m", m -> m, mock(KVSerde.class), "p2")
+          .sendTo(outputStream1);
+      messageStream3
+          .join(messageStream2, mock(JoinFunction.class),
+              mock(Serde.class), mock(Serde.class), mock(Serde.class), Duration.ofHours(1), "j2")
+          .sendTo(outputStream2);
+    }, config);
 
     Multimap<SystemStream, SystemStream> outputToInput =
         OperatorImplGraph.getIntermediateToInputStreamsMap(graphSpec.getOperatorSpecGraph(), new StreamConfig(config));
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java b/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java
index 594cd4a..76b79a7 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/TestWindowOperator.java
@@ -114,7 +114,7 @@
     StreamOperatorTask task = new StreamOperatorTask(sgb, testClock);
     task.init(this.context);
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     integers.forEach(n -> task.processAsync(new IntegerEnvelope(n), messageCollector, taskCoordinator, taskCallback));
     testClock.advanceTime(Duration.ofSeconds(1));
 
@@ -148,7 +148,7 @@
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     Assert.assertEquals(windowPanes.size(), 0);
 
     integers.forEach(n -> task.processAsync(new IntegerEnvelope(n), messageCollector, taskCoordinator, taskCallback));
@@ -197,7 +197,7 @@
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     integers.forEach(n -> task.processAsync(new IntegerEnvelope(n), messageCollector, taskCoordinator, taskCallback));
     testClock.advanceTime(Duration.ofSeconds(1));
     task.window(messageCollector, taskCoordinator);
@@ -225,7 +225,7 @@
     StreamOperatorTask task = new StreamOperatorTask(sgb, testClock);
     task.init(this.context);
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     testClock.advanceTime(Duration.ofSeconds(1));
@@ -271,7 +271,7 @@
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.init(this.context);
 
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
@@ -304,7 +304,7 @@
 
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     Assert.assertEquals(windowPanes.size(), 1);
@@ -348,7 +348,7 @@
 
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     //assert that the count trigger fired
@@ -403,7 +403,7 @@
     List<WindowPane<Integer, Collection<IntegerEnvelope>>> windowPanes = new ArrayList<>();
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
 
     TestClock testClock = new TestClock();
     StreamOperatorTask task = new StreamOperatorTask(sgb, testClock);
@@ -446,7 +446,7 @@
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
     Assert.assertEquals(windowPanes.size(), 0);
 
     List<Integer> integerList = ImmutableList.of(1, 2, 1, 2, 1);
@@ -479,7 +479,7 @@
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
 
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
@@ -510,7 +510,7 @@
     task.init(this.context);
 
     MessageCollector messageCollector =
-        envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
+      envelope -> windowPanes.add((WindowPane<Integer, Collection<IntegerEnvelope>>) envelope.getMessage());
 
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
     task.processAsync(new IntegerEnvelope(1), messageCollector, taskCoordinator, taskCallback);
@@ -537,9 +537,9 @@
           .window(Windows.keyedTumblingWindow(KV::getKey, duration, new IntegerSerde(), kvSerde)
               .setEarlyTrigger(earlyTrigger).setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
@@ -555,9 +555,9 @@
           .window(Windows.tumblingWindow(duration, kvSerde).setEarlyTrigger(earlyTrigger)
               .setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
@@ -572,9 +572,9 @@
           .window(Windows.keyedSessionWindow(KV::getKey, duration, new IntegerSerde(), kvSerde)
               .setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
@@ -594,9 +594,9 @@
               .setEarlyTrigger(earlyTrigger)
               .setAccumulationMode(mode), "w1")
           .sink((message, messageCollector, taskCoordinator) -> {
-              SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
-              messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
-            });
+            SystemStream outputSystemStream = new SystemStream("outputSystem", "outputStream");
+            messageCollector.send(new OutgoingMessageEnvelope(outputSystemStream, message));
+          });
     };
 
     return new StreamApplicationDescriptorImpl(userApp, config);
diff --git a/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java b/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java
index 94e171a..cabd6d3 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/impl/store/TestTimeSeriesStoreImpl.java
@@ -98,15 +98,15 @@
     List<TimestampedValue<byte[]>> values = readStore(timeSeriesStore, "hello", 0L, 2L);
     Assert.assertEquals(100, values.size());
     values.forEach(timeSeriesValue -> {
-        Assert.assertEquals("world-1", new String(timeSeriesValue.getValue()));
-      });
+      Assert.assertEquals("world-1", new String(timeSeriesValue.getValue()));
+    });
 
     // read from time-range [2,4) should return 100 entries
     values = readStore(timeSeriesStore, "hello", 2L, 4L);
     Assert.assertEquals(100, values.size());
     values.forEach(timeSeriesValue -> {
-        Assert.assertEquals("world-2", new String(timeSeriesValue.getValue()));
-      });
+      Assert.assertEquals("world-2", new String(timeSeriesValue.getValue()));
+    });
 
     // read all entries in the store
     values = readStore(timeSeriesStore, "hello", 0L, Integer.MAX_VALUE);
diff --git a/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java b/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java
index 71c3486..e0a4f1a 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/spec/TestOperatorSpec.java
@@ -164,7 +164,7 @@
   @Test
   public void testStreamOperatorSpecWithMap() {
     MapFunction<TestMessageEnvelope, TestOutputMessageEnvelope> mapFn =
-        m -> new TestOutputMessageEnvelope(m.getKey(), m.getMessage().hashCode());
+      m -> new TestOutputMessageEnvelope(m.getKey(), m.getMessage().hashCode());
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> streamOperatorSpec =
         OperatorSpecs.createMapOperatorSpec(mapFn, "op0");
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> cloneOperatorSpec =
@@ -381,7 +381,7 @@
     List<String> keys = new ArrayList<>(1);
     keys.add(0, "test-1");
     MapFunction<TestMessageEnvelope, TestOutputMessageEnvelope> mapFn =
-        m -> new TestOutputMessageEnvelope(keys.get(m.getKey().hashCode() % 1), integers.get(m.getMessage().hashCode() % 1));
+      m -> new TestOutputMessageEnvelope(keys.get(m.getKey().hashCode() % 1), integers.get(m.getMessage().hashCode() % 1));
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> streamOperatorSpec =
         OperatorSpecs.createMapOperatorSpec(mapFn, "op0");
     StreamOperatorSpec<TestMessageEnvelope, TestOutputMessageEnvelope> cloneOperatorSpec =
diff --git a/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java b/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java
index 101e629..870f813 100644
--- a/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java
+++ b/samza-core/src/test/java/org/apache/samza/operators/spec/TestPartitionByOperatorSpec.java
@@ -62,9 +62,9 @@
     MapFunction<Object, Object> valueFn = m -> m;
     KVSerde<Object, Object> partitionBySerde = KVSerde.of(new NoOpSerde<>(), new NoOpSerde<>());
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, valueFn, partitionBySerde, testRepartitionedStreamName);
-      }, getConfig());
+      MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, valueFn, partitionBySerde, testRepartitionedStreamName);
+    }, getConfig());
     assertEquals(2, streamAppDesc.getInputOperators().size());
     Map<String, InputOperatorSpec> inputOpSpecs = streamAppDesc.getInputOperators();
     assertTrue(inputOpSpecs.keySet().contains(String.format("%s-%s-partition_by-%s", testJobName, testJobId, testRepartitionedStreamName)));
@@ -91,9 +91,9 @@
     MapFunction<Object, String> keyFn = m -> m.toString();
     MapFunction<Object, Object> valueFn = m -> m;
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, valueFn, mock(KVSerde.class), testRepartitionedStreamName);
-      }, getConfig());
+      MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, valueFn, mock(KVSerde.class), testRepartitionedStreamName);
+    }, getConfig());
     InputOperatorSpec inputOpSpec = streamAppDesc.getInputOperators().get(
         String.format("%s-%s-partition_by-%s", testJobName, testJobId, testRepartitionedStreamName));
     assertNotNull(inputOpSpec);
@@ -116,9 +116,9 @@
   @Test
   public void testCopy() {
     StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(m -> m.toString(), m -> m, mock(KVSerde.class), testRepartitionedStreamName);
-      }, getConfig());
+      MessageStream inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(m -> m.toString(), m -> m, mock(KVSerde.class), testRepartitionedStreamName);
+    }, getConfig());
     OperatorSpecGraph specGraph = streamAppDesc.getOperatorSpecGraph();
     OperatorSpecGraph clonedGraph = specGraph.clone();
     OperatorSpecTestUtils.assertClonedGraph(specGraph, clonedGraph);
@@ -128,36 +128,36 @@
   public void testScheduledFunctionAsKeyFn() {
     ScheduledMapFn keyFn = new ScheduledMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testWatermarkFunctionAsKeyFn() {
     WatermarkMapFn keyFn = new WatermarkMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(keyFn, m -> m, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testScheduledFunctionAsValueFn() {
     ScheduledMapFn valueFn = new ScheduledMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   @Test(expected = IllegalArgumentException.class)
   public void testWatermarkFunctionAsValueFn() {
     WatermarkMapFn valueFn = new WatermarkMapFn();
     new StreamApplicationDescriptorImpl(appDesc -> {
-        MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
-        inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
-      }, getConfig());
+      MessageStream<Object> inputStream = appDesc.getInputStream(testInputDescriptor);
+      inputStream.partitionBy(m -> m.toString(), valueFn, mock(KVSerde.class), "parByKey");
+    }, getConfig());
   }
 
   private Config getConfig() {
diff --git a/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java b/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java
index 0978738..bb601ce 100644
--- a/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java
+++ b/samza-core/src/test/java/org/apache/samza/processor/TestStreamProcessor.java
@@ -124,19 +124,17 @@
     SamzaContainer createSamzaContainer(String processorId, JobModel jobModel) {
       if (container == null) {
         RunLoop mockRunLoop = mock(RunLoop.class);
-        doAnswer(invocation ->
-          {
-            runLoopStartForMain.countDown();
-            containerStop.await();
-            Thread.sleep(this.runLoopShutdownDuration.toMillis());
-            return null;
-          }).when(mockRunLoop).run();
+        doAnswer(invocation -> {
+          runLoopStartForMain.countDown();
+          containerStop.await();
+          Thread.sleep(this.runLoopShutdownDuration.toMillis());
+          return null;
+        }).when(mockRunLoop).run();
 
-        Mockito.doAnswer(invocation ->
-          {
-            containerStop.countDown();
-            return null;
-          }).when(mockRunLoop).shutdown();
+        Mockito.doAnswer(invocation -> {
+          containerStop.countDown();
+          return null;
+        }).when(mockRunLoop).shutdown();
         container = StreamProcessorTestUtils.getDummyContainer(mockRunLoop, Mockito.mock(StreamTask.class));
       }
       return container;
@@ -198,29 +196,26 @@
         null);
 
     final CountDownLatch coordinatorStop = new CountDownLatch(1);
-    final Thread jcThread = new Thread(() ->
-      {
-        try {
-          processor.jobCoordinatorListener.onJobModelExpired();
-          processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
-          coordinatorStop.await();
-          processor.jobCoordinatorListener.onCoordinatorStop();
-        } catch (InterruptedException e) {
-          e.printStackTrace();
-        }
-      });
+    final Thread jcThread = new Thread(() -> {
+      try {
+        processor.jobCoordinatorListener.onJobModelExpired();
+        processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
+        coordinatorStop.await();
+        processor.jobCoordinatorListener.onCoordinatorStop();
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+    });
 
-    doAnswer(invocation ->
-      {
-        coordinatorStop.countDown();
-        return null;
-      }).when(mockJobCoordinator).stop();
+    doAnswer(invocation -> {
+      coordinatorStop.countDown();
+      return null;
+    }).when(mockJobCoordinator).stop();
 
-    doAnswer(invocation ->
-      {
-        jcThread.start();
-        return null;
-      }).when(mockJobCoordinator).start();
+    doAnswer(invocation -> {
+      jcThread.start();
+      return null;
+    }).when(mockJobCoordinator).start();
 
     processor.start();
     processorListenerStart.await(10, TimeUnit.SECONDS);
@@ -277,29 +272,29 @@
         Duration.of(1, ChronoUnit.SECONDS));
 
     Thread jcThread = new Thread(() -> {
-        // gets processor into rebalance mode so onNewJobModel creates a new container
-        processor.jobCoordinatorListener.onJobModelExpired();
-        processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
-        try {
-          // wait for the run loop to be ready before triggering rebalance
-          processor.runLoopStartForMain.await();
-        } catch (InterruptedException e) {
-          e.printStackTrace();
-        }
-        processor.jobCoordinatorListener.onJobModelExpired();
-      });
+      // gets processor into rebalance mode so onNewJobModel creates a new container
+      processor.jobCoordinatorListener.onJobModelExpired();
+      processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
+      try {
+        // wait for the run loop to be ready before triggering rebalance
+        processor.runLoopStartForMain.await();
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+      processor.jobCoordinatorListener.onJobModelExpired();
+    });
     doAnswer(invocation -> {
-        jcThread.start();
-        return null;
-      }).when(mockJobCoordinator).start();
+      jcThread.start();
+      return null;
+    }).when(mockJobCoordinator).start();
 
     // ensure that the coordinator stop occurred before checking the exception being thrown
     CountDownLatch coordinatorStop = new CountDownLatch(1);
     doAnswer(invocation -> {
-        processor.jobCoordinatorListener.onCoordinatorStop();
-        coordinatorStop.countDown();
-        return null;
-      }).when(mockJobCoordinator).stop();
+      processor.jobCoordinatorListener.onCoordinatorStop();
+      coordinatorStop.countDown();
+      return null;
+    }).when(mockJobCoordinator).stop();
 
     processor.start();
 
@@ -324,16 +319,15 @@
     AtomicReference<Throwable> actualThrowable = new AtomicReference<>();
     final CountDownLatch runLoopStartedLatch = new CountDownLatch(1);
     RunLoop failingRunLoop = mock(RunLoop.class);
-    doAnswer(invocation ->
-      {
-        try {
-          runLoopStartedLatch.countDown();
-          throw expectedThrowable;
-        } catch (InterruptedException ie) {
-          ie.printStackTrace();
-        }
-        return null;
-      }).when(failingRunLoop).run();
+    doAnswer(invocation -> {
+      try {
+        runLoopStartedLatch.countDown();
+        throw expectedThrowable;
+      } catch (InterruptedException ie) {
+        ie.printStackTrace();
+      }
+      return null;
+    }).when(failingRunLoop).run();
 
     SamzaContainer mockContainer = StreamProcessorTestUtils.getDummyContainer(failingRunLoop, mock(StreamTask.class));
     final CountDownLatch processorListenerFailed = new CountDownLatch(1);
@@ -369,27 +363,24 @@
         mockContainer);
 
     final CountDownLatch coordinatorStop = new CountDownLatch(1);
-    doAnswer(invocation ->
-      {
-        coordinatorStop.countDown();
-        return null;
-      }).when(mockJobCoordinator).stop();
+    doAnswer(invocation -> {
+      coordinatorStop.countDown();
+      return null;
+    }).when(mockJobCoordinator).stop();
 
-    doAnswer(invocation ->
-      {
-        new Thread(() ->
-          {
-            try {
-              processor.jobCoordinatorListener.onJobModelExpired();
-              processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
-              coordinatorStop.await();
-              processor.jobCoordinatorListener.onCoordinatorStop();
-            } catch (InterruptedException e) {
-              e.printStackTrace();
-            }
-          }).start();
-        return null;
-      }).when(mockJobCoordinator).start();
+    doAnswer(invocation -> {
+      new Thread(() -> {
+        try {
+          processor.jobCoordinatorListener.onJobModelExpired();
+          processor.jobCoordinatorListener.onNewJobModel("1", getMockJobModel());
+          coordinatorStop.await();
+          processor.jobCoordinatorListener.onCoordinatorStop();
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+      }).start();
+      return null;
+    }).when(mockJobCoordinator).start();
 
     processor.start();
 
@@ -517,11 +508,11 @@
      */
 
     Mockito.when(executorService.shutdownNow()).thenAnswer(ctx -> {
-        if (!failContainerInterrupt) {
-          shutdownLatch.countDown();
-        }
-        return null;
-      });
+      if (!failContainerInterrupt) {
+        shutdownLatch.countDown();
+      }
+      return null;
+    });
     Mockito.when(executorService.isShutdown()).thenReturn(true);
 
     streamProcessor.state = State.IN_REBALANCE;
@@ -617,8 +608,8 @@
     AtomicReference<MockStreamProcessorLifecycleListener> mockListener = new AtomicReference<>();
     StreamProcessor streamProcessor =
         new StreamProcessor("TestProcessorId", mock(Config.class), new HashMap<>(), mock(TaskFactory.class),
-            Optional.empty(), Optional.empty(), Optional.empty(),
-            sp -> mockListener.updateAndGet(old -> new MockStreamProcessorLifecycleListener(sp)),
+            Optional.empty(), Optional.empty(), Optional.empty(), sp ->
+            mockListener.updateAndGet(old -> new MockStreamProcessorLifecycleListener(sp)),
             mock(JobCoordinator.class), Mockito.mock(MetadataStore.class));
     assertEquals(streamProcessor, mockListener.get().processor);
   }
diff --git a/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java b/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java
index 777929a..4bd41d0 100644
--- a/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java
+++ b/samza-core/src/test/java/org/apache/samza/runtime/TestClusterBasedProcessorLifecycleListener.java
@@ -78,10 +78,10 @@
   @Test
   public void testShutdownHookInvokesShutdownHookCallback() {
     doAnswer(invocation -> {
-        // Simulate call to container.shutdown()
-        clusterBasedProcessorLifecycleListener.afterStop();
-        return null;
-      }).when(mockShutdownHookCallback).run();
+      // Simulate call to container.shutdown()
+      clusterBasedProcessorLifecycleListener.afterStop();
+      return null;
+    }).when(mockShutdownHookCallback).run();
 
     // call beforeStart to setup shutdownHook
     clusterBasedProcessorLifecycleListener.beforeStart();
diff --git a/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java b/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java
index cef7906..344f082 100644
--- a/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java
+++ b/samza-core/src/test/java/org/apache/samza/runtime/TestLocalApplicationRunner.java
@@ -118,13 +118,12 @@
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -157,13 +156,12 @@
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     doReturn(sp).when(runner).createStreamProcessor(anyObject(), anyObject(),
         captor.capture(), eq(Optional.empty()), any(CoordinatorStreamStore.class));
@@ -195,13 +193,12 @@
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -233,13 +230,12 @@
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        listener.afterStop();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      listener.afterStop();
+      return null;
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -269,10 +265,9 @@
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        throw new Exception("test failure");
-      }).when(sp).start();
+    doAnswer(i -> {
+      throw new Exception("test failure");
+    }).when(sp).start();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -310,19 +305,17 @@
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      return null;
+    }).when(sp).start();
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStop();
-        return null;
-      }).when(sp).stop();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStop();
+      return null;
+    }).when(sp).stop();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
@@ -354,19 +347,17 @@
     ArgumentCaptor<StreamProcessor.StreamProcessorLifecycleListenerFactory> captor =
         ArgumentCaptor.forClass(StreamProcessor.StreamProcessorLifecycleListenerFactory.class);
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStart();
-        return null;
-      }).when(sp).start();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStart();
+      return null;
+    }).when(sp).start();
 
-    doAnswer(i ->
-      {
-        ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
-        listener.afterStop();
-        return null;
-      }).when(sp).stop();
+    doAnswer(i -> {
+      ProcessorLifecycleListener listener = captor.getValue().createInstance(sp);
+      listener.afterStop();
+      return null;
+    }).when(sp).stop();
 
     ExternalContext externalContext = mock(ExternalContext.class);
     doReturn(sp).when(runner)
diff --git a/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java b/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java
index 5db908c..008f2f6 100644
--- a/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java
+++ b/samza-core/src/test/java/org/apache/samza/scheduler/TestEpochTimeScheduler.java
@@ -21,6 +21,8 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.ScheduledFuture;
 import org.apache.samza.task.MessageCollector;
@@ -42,18 +44,137 @@
   private ScheduledExecutorService createExecutorService() {
     ScheduledExecutorService service = mock(ScheduledExecutorService.class);
     when(service.schedule((Runnable) anyObject(), anyLong(), anyObject())).thenAnswer(invocation -> {
-        Object[] args = invocation.getArguments();
-        Runnable runnable = (Runnable) args[0];
-        runnable.run();
-        return mock(ScheduledFuture.class);
-      });
+      Object[] args = invocation.getArguments();
+      Runnable runnable = (Runnable) args[0];
+      runnable.run();
+      return mock(ScheduledFuture.class);
+    });
     return service;
   }
 
   private void fireTimers(EpochTimeScheduler factory) {
     factory.removeReadyTimers().entrySet().forEach(entry -> {
-        entry.getValue().onCallback(entry.getKey().getKey(), mock(MessageCollector.class), mock(TaskCoordinator.class));
-      });
+      entry.getValue().onCallback(entry.getKey().getKey(), mock(MessageCollector.class), mock(TaskCoordinator.class));
+    });
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testDuplicateTimerWithCancelableCallback() {
+    final String timerKey = "timer-1";
+    ScheduledFuture mockScheduledFuture1 = mock(ScheduledFuture.class);
+    ScheduledFuture mockScheduledFuture2 = mock(ScheduledFuture.class);
+    ScheduledExecutorService executor = mock(ScheduledExecutorService.class);
+
+    when(mockScheduledFuture1.cancel(anyBoolean())).thenReturn(true);
+    when(executor.schedule((Runnable) anyObject(), anyLong(), anyObject()))
+        .thenReturn(mockScheduledFuture1)
+        .thenAnswer(invocation -> {
+          Object[] args = invocation.getArguments();
+          Runnable runnable = (Runnable) args[0];
+          runnable.run();
+          return mockScheduledFuture2;
+        });
+
+    EpochTimeScheduler scheduler = EpochTimeScheduler.create(executor);
+    long timestamp = System.currentTimeMillis() + 10000;
+
+    ScheduledCallback<String> expectedScheduledCallback = mock(ScheduledCallback.class);
+    scheduler.setTimer(timerKey, timestamp, mock(ScheduledCallback.class));
+    scheduler.setTimer(timerKey, timestamp, expectedScheduledCallback);
+
+    // verify the interactions with the scheduled future and the scheduler
+    verify(executor, times(2)).schedule((Runnable) anyObject(), anyLong(), anyObject());
+    verify(mockScheduledFuture1, times(1)).cancel(anyBoolean());
+
+    // verify the ready timer and its callback contents to ensure the second invocation callback overwrites the
+    // first callback
+    Set<Map.Entry<EpochTimeScheduler.TimerKey<?>, ScheduledCallback>> readyTimers =
+        scheduler.removeReadyTimers().entrySet();
+    assertEquals("Only one timer should be ready to be fired", readyTimers.size(), 1);
+
+    Map.Entry<EpochTimeScheduler.TimerKey<?>, ScheduledCallback> timerEntry = readyTimers.iterator().next();
+    assertEquals("Expected the scheduled callback from the second invocation",
+        timerEntry.getValue(),
+        expectedScheduledCallback);
+    assertEquals("Expected timer-1 as the key for ready timer",
+        timerEntry.getKey().getKey(),
+        timerKey);
+  }
+
+  @Test
+  @SuppressWarnings("unchecked")
+  public void testDuplicateTimerWithUnsuccessfulCancellation() {
+    final String timerKey = "timer-1";
+    ScheduledFuture mockScheduledFuture1 = mock(ScheduledFuture.class);
+    ScheduledExecutorService executor = mock(ScheduledExecutorService.class);
+
+    when(mockScheduledFuture1.cancel(anyBoolean())).thenReturn(false);
+    when(mockScheduledFuture1.isDone()).thenReturn(false);
+    when(executor.schedule((Runnable) anyObject(), anyLong(), anyObject()))
+        .thenReturn(mockScheduledFuture1);
+
+    EpochTimeScheduler scheduler = EpochTimeScheduler.create(executor);
+    long timestamp = System.currentTimeMillis() + 10000;
+
+    scheduler.setTimer(timerKey, timestamp, mock(ScheduledCallback.class));
+    scheduler.setTimer(timerKey, timestamp, mock(ScheduledCallback.class));
+
+    // verify the interactions with the scheduled future and the scheduler
+    verify(executor, times(1)).schedule((Runnable) anyObject(), anyLong(), anyObject());
+    verify(mockScheduledFuture1, times(1)).cancel(anyBoolean());
+    verify(mockScheduledFuture1, times(1)).isDone();
+
+    Map<Object, ScheduledFuture> scheduledFutures = scheduler.getScheduledFutures();
+    assertTrue("Expected the timer to be in the queue", scheduledFutures.containsKey(timerKey));
+    assertEquals("Expected the scheduled callback from the first invocation",
+        scheduledFutures.get(timerKey),
+        mockScheduledFuture1);
+  }
+
+  @Test
+  public void testDuplicateTimerWithFinishedCallbacks() {
+    final String timerKey = "timer-1";
+    ScheduledFuture mockScheduledFuture1 = mock(ScheduledFuture.class);
+    ScheduledFuture mockScheduledFuture2 = mock(ScheduledFuture.class);
+    ScheduledExecutorService executor = mock(ScheduledExecutorService.class);
+
+    when(mockScheduledFuture1.cancel(anyBoolean())).thenReturn(false);
+    when(mockScheduledFuture1.isDone()).thenReturn(true);
+    when(executor.schedule((Runnable) anyObject(), anyLong(), anyObject()))
+        .thenReturn(mockScheduledFuture1)
+        .thenAnswer(invocation -> {
+          Object[] args = invocation.getArguments();
+          Runnable runnable = (Runnable) args[0];
+          runnable.run();
+          return mockScheduledFuture2;
+        });
+
+    EpochTimeScheduler scheduler = EpochTimeScheduler.create(executor);
+    long timestamp = System.currentTimeMillis() + 10000;
+
+    ScheduledCallback<String> expectedScheduledCallback = mock(ScheduledCallback.class);
+    scheduler.setTimer(timerKey, timestamp, mock(ScheduledCallback.class));
+    scheduler.setTimer(timerKey, timestamp, expectedScheduledCallback);
+
+    // verify the interactions with the scheduled future and the scheduler
+    verify(executor, times(2)).schedule((Runnable) anyObject(), anyLong(), anyObject());
+    verify(mockScheduledFuture1, times(1)).cancel(anyBoolean());
+    verify(mockScheduledFuture1, times(1)).isDone();
+
+    // verify the ready timer and its callback contents to ensure the second invocation callback overwrites the
+    // first callback
+    Set<Map.Entry<EpochTimeScheduler.TimerKey<?>, ScheduledCallback>> readyTimers =
+        scheduler.removeReadyTimers().entrySet();
+    assertEquals("Only one timer should be ready to be fired", readyTimers.size(), 1);
+
+    Map.Entry<EpochTimeScheduler.TimerKey<?>, ScheduledCallback> timerEntry = readyTimers.iterator().next();
+    assertEquals("Expected the scheduled callback from the second invocation",
+        timerEntry.getValue(),
+        expectedScheduledCallback);
+    assertEquals("Expected timer-1 as the key for ready timer",
+        timerEntry.getKey().getKey(),
+        timerKey);
   }
 
   @Test
@@ -61,8 +182,8 @@
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     List<String> results = new ArrayList<>();
     scheduler.setTimer("single-timer", 1, (key, collector, coordinator) -> {
-        results.add(key);
-      });
+      results.add(key);
+    });
 
     fireTimers(scheduler);
 
@@ -75,14 +196,14 @@
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     List<String> results = new ArrayList<>();
     scheduler.setTimer("multiple-timer-3", 3, (key, collector, coordinator) -> {
-        results.add(key + ":3");
-      });
+      results.add(key + ":3");
+    });
     scheduler.setTimer("multiple-timer-2", 2, (key, collector, coordinator) -> {
-        results.add(key + ":2");
-      });
+      results.add(key + ":2");
+    });
     scheduler.setTimer("multiple-timer-1", 1, (key, collector, coordinator) -> {
-        results.add(key + ":1");
-      });
+      results.add(key + ":1");
+    });
 
     fireTimers(scheduler);
 
@@ -100,13 +221,13 @@
 
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     scheduler.setTimer(key1, 2, (key, collector, coordinator) -> {
-        assertEquals(key, key1);
-        results.add("key1:2");
-      });
+      assertEquals(key, key1);
+      results.add("key1:2");
+    });
     scheduler.setTimer(key2, 1, (key, collector, coordinator) -> {
-        assertEquals(key, key2);
-        results.add("key2:1");
-      });
+      assertEquals(key, key2);
+      results.add("key2:1");
+    });
 
     fireTimers(scheduler);
 
@@ -123,13 +244,13 @@
 
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     scheduler.setTimer(key1, 1, (key, collector, coordinator) -> {
-        assertEquals(key, key1);
-        results.add("key:1");
-      });
+      assertEquals(key, key1);
+      results.add("key:1");
+    });
     scheduler.setTimer(key2, 2, (key, collector, coordinator) -> {
-        assertEquals(key.longValue(), Long.MAX_VALUE);
-        results.add(Long.MAX_VALUE + ":2");
-      });
+      assertEquals(key.longValue(), Long.MAX_VALUE);
+      results.add(Long.MAX_VALUE + ":2");
+    });
 
     fireTimers(scheduler);
 
@@ -148,8 +269,8 @@
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(service);
     List<String> results = new ArrayList<>();
     scheduler.setTimer("timer", 1, (key, collector, coordinator) -> {
-        results.add(key);
-      });
+      results.add(key);
+    });
 
     scheduler.deleteTimer("timer");
 
@@ -164,11 +285,11 @@
     EpochTimeScheduler scheduler = EpochTimeScheduler.create(createExecutorService());
     List<String> results = new ArrayList<>();
     scheduler.registerListener(() -> {
-        results.add("timer-listener");
-      });
+      results.add("timer-listener");
+    });
 
     scheduler.setTimer("timer-listener", 1, (key, collector, coordinator) -> {
-      });
+    });
 
     fireTimers(scheduler);
 
diff --git a/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputHandler.java b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputHandler.java
new file mode 100644
index 0000000..656b2ef
--- /dev/null
+++ b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputHandler.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.storage;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.apache.samza.Partition;
+import org.apache.samza.container.TaskName;
+import org.apache.samza.job.model.TaskMode;
+import org.apache.samza.system.StreamMetadataCache;
+import org.apache.samza.system.SystemAdmin;
+import org.apache.samza.system.SystemAdmins;
+import org.apache.samza.system.SystemStream;
+import org.apache.samza.system.SystemStreamMetadata;
+import org.apache.samza.system.SystemStreamPartition;
+import org.apache.samza.util.Clock;
+import org.apache.samza.util.ScalaJavaUtil;
+import org.junit.Assert;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
+
+public class TestTaskSideInputHandler {
+  private static final String TEST_SYSTEM = "test-system";
+  private static final String TEST_STORE = "test-store";
+  private static final String TEST_STREAM = "test-stream";
+
+    /**
+   * This test is for cases, when calls to systemAdmin (e.g., KafkaSystemAdmin's) get-stream-metadata method return null.
+   */
+  @Test
+  public void testGetStartingOffsetsWhenStreamMetadataIsNull() {
+    final String taskName = "test-get-starting-offset-task";
+
+    Set<SystemStreamPartition> ssps = IntStream.range(1, 2)
+        .mapToObj(idx -> new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(idx)))
+        .collect(Collectors.toSet());
+    Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = ssps.stream()
+        .collect(Collectors.toMap(SystemStreamPartition::getPartition,
+          x -> new SystemStreamMetadata.SystemStreamPartitionMetadata(null, "1", "2")));
+
+
+    TaskSideInputHandler handler = new MockTaskSideInputHandlerBuilder(taskName, TaskMode.Active)
+        .addStreamMetadata(Collections.singletonMap(new SystemStream(TEST_SYSTEM, TEST_STREAM),
+            new SystemStreamMetadata(TEST_STREAM, partitionMetadata)))
+        .addStore(TEST_STORE, ssps)
+        .build();
+
+    handler.init();
+
+    ssps.forEach(ssp -> {
+      String startingOffset = handler.getStartingOffset(
+          new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, ssp.getPartition()));
+      Assert.assertNull("Starting offset should be null", startingOffset);
+    });
+  }
+
+  @Test
+  public void testGetStartingOffsets() {
+    final String storeName = "test-get-starting-offset-store";
+    final String taskName = "test-get-starting-offset-task";
+
+    Set<SystemStreamPartition> ssps = IntStream.range(1, 6)
+        .mapToObj(idx -> new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(idx)))
+        .collect(Collectors.toSet());
+
+
+    TaskSideInputHandler handler = new MockTaskSideInputHandlerBuilder(taskName, TaskMode.Active)
+        .addStore(storeName, ssps)
+        .build();
+
+    // set up file and oldest offsets. for even partitions, fileOffsets will be larger; for odd partitions oldestOffsets will be larger
+    Map<SystemStreamPartition, String> fileOffsets = ssps.stream()
+        .collect(Collectors.toMap(Function.identity(), ssp -> {
+          int partitionId = ssp.getPartition().getPartitionId();
+          int offset = partitionId % 2 == 0 ? partitionId + 10 : partitionId;
+          return String.valueOf(offset);
+        }));
+    Map<SystemStreamPartition, String> oldestOffsets = ssps.stream()
+        .collect(Collectors.toMap(Function.identity(), ssp -> {
+          int partitionId = ssp.getPartition().getPartitionId();
+          int offset = partitionId % 2 == 0 ? partitionId : partitionId + 10;
+
+          return String.valueOf(offset);
+        }));
+
+    doCallRealMethod().when(handler).getStartingOffsets(fileOffsets, oldestOffsets);
+
+    Map<SystemStreamPartition, String> startingOffsets = handler.getStartingOffsets(fileOffsets, oldestOffsets);
+
+    assertTrue("Failed to get starting offsets for all ssps", startingOffsets.size() == 5);
+    startingOffsets.forEach((ssp, offset) -> {
+      int partitionId = ssp.getPartition().getPartitionId();
+      String expectedOffset = partitionId % 2 == 0
+          // 1 + fileOffset
+          ? getOffsetAfter(String.valueOf(ssp.getPartition().getPartitionId() + 10))
+          // oldestOffset
+          : String.valueOf(ssp.getPartition().getPartitionId() + 10);
+      assertEquals("Larger of fileOffsets and oldestOffsets should always be chosen", expectedOffset, offset);
+    });
+  }
+
+  private static final class MockTaskSideInputHandlerBuilder {
+    final TaskName taskName;
+    final TaskMode taskMode;
+    final File storeBaseDir;
+
+    final Map<String, StorageEngine> stores = new HashMap<>();
+    final Map<String, Set<SystemStreamPartition>> storeToSSPs = new HashMap<>();
+    final Clock clock = mock(Clock.class);
+    final Map<String, SideInputsProcessor> storeToProcessor = new HashMap<>();
+    final StreamMetadataCache streamMetadataCache = mock(StreamMetadataCache.class);
+    final SystemAdmins systemAdmins = mock(SystemAdmins.class);
+
+    public MockTaskSideInputHandlerBuilder(String taskName, TaskMode taskMode) {
+      this.taskName = new TaskName(taskName);
+      this.taskMode = taskMode;
+      this.storeBaseDir = mock(File.class);
+
+      initializeMocks();
+    }
+
+    private void initializeMocks() {
+      SystemAdmin admin = mock(SystemAdmin.class);
+      doAnswer(invocation -> {
+        String offset1 = invocation.getArgumentAt(0, String.class);
+        String offset2 = invocation.getArgumentAt(1, String.class);
+
+        return Long.compare(Long.parseLong(offset1), Long.parseLong(offset2));
+      }).when(admin).offsetComparator(any(), any());
+      doAnswer(invocation -> {
+        Map<SystemStreamPartition, String> sspToOffsets = invocation.getArgumentAt(0, Map.class);
+
+        return sspToOffsets.entrySet()
+            .stream()
+            .collect(Collectors.toMap(Map.Entry::getKey, entry -> getOffsetAfter(entry.getValue())));
+      }).when(admin).getOffsetsAfter(any());
+      doReturn(admin).when(systemAdmins).getSystemAdmin(TEST_SYSTEM);
+      doReturn(ScalaJavaUtil.toScalaMap(new HashMap<>())).when(streamMetadataCache).getStreamMetadata(any(), anyBoolean());
+    }
+
+
+    MockTaskSideInputHandlerBuilder addStreamMetadata(Map<SystemStream, SystemStreamMetadata> streamMetadata) {
+      doReturn(ScalaJavaUtil.toScalaMap(streamMetadata)).when(streamMetadataCache).getStreamMetadata(any(), anyBoolean());
+      return this;
+    }
+
+    MockTaskSideInputHandlerBuilder addStore(String storeName, Set<SystemStreamPartition> storeSSPs) {
+      storeToSSPs.put(storeName, storeSSPs);
+      storeToProcessor.put(storeName, mock(SideInputsProcessor.class));
+      return this;
+    }
+
+    TaskSideInputHandler build() {
+      return spy(new TaskSideInputHandler(taskName,
+          taskMode,
+          storeBaseDir,
+          stores,
+          storeToSSPs,
+          storeToProcessor,
+          systemAdmins,
+          streamMetadataCache,
+          clock));
+    }
+  }
+
+  private static String getOffsetAfter(String offset) {
+    return String.valueOf(Long.parseLong(offset) + 1);
+  }
+}
diff --git a/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java
index a7cefa0..0412154 100644
--- a/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java
+++ b/samza-core/src/test/java/org/apache/samza/storage/TestTaskSideInputStorageManager.java
@@ -19,8 +19,10 @@
 
 package org.apache.samza.storage;
 
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import java.io.File;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
@@ -28,15 +30,10 @@
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import org.apache.samza.Partition;
-import org.apache.samza.config.Config;
 import org.apache.samza.container.TaskName;
 import org.apache.samza.job.model.TaskMode;
-import org.apache.samza.system.StreamMetadataCache;
-import org.apache.samza.system.SystemAdmin;
-import org.apache.samza.system.SystemAdmins;
 import org.apache.samza.system.SystemStreamPartition;
 import org.apache.samza.util.Clock;
-import org.apache.samza.util.ScalaJavaUtil;
 import org.junit.Test;
 
 import static org.junit.Assert.*;
@@ -68,6 +65,7 @@
     final String taskName = "test-flush-task";
     final SystemStreamPartition ssp = new SystemStreamPartition("test-system", "test-stream", new Partition(0));
     final String offset = "123";
+    final ImmutableMap<SystemStreamPartition, String> processedOffsets = ImmutableMap.of(ssp, offset);
 
     TaskSideInputStorageManager testSideInputStorageManager = new MockTaskSideInputStorageManagerBuilder(taskName, LOGGED_STORE_DIR)
         .addLoggedStore(storeName, ImmutableSet.of(ssp))
@@ -75,14 +73,13 @@
     Map<String, StorageEngine> stores = new HashMap<>();
 
     initializeSideInputStorageManager(testSideInputStorageManager);
-    testSideInputStorageManager.updateLastProcessedOffset(ssp, offset);
-    testSideInputStorageManager.flush();
+    testSideInputStorageManager.flush(processedOffsets);
 
     for (StorageEngine storageEngine : stores.values()) {
       verify(storageEngine).flush();
     }
 
-    verify(testSideInputStorageManager).writeOffsetFiles();
+    verify(testSideInputStorageManager).writeFileOffsets(eq(processedOffsets));
 
     File storeDir = testSideInputStorageManager.getStoreLocation(storeName);
     assertTrue("Store directory: " + storeDir.getPath() + " is missing.", storeDir.exists());
@@ -96,16 +93,19 @@
   public void testStop() {
     final String storeName = "test-stop-store";
     final String taskName = "test-stop-task";
+    final SystemStreamPartition ssp = new SystemStreamPartition("test-system", "test-stream", new Partition(0));
+    final String offset = "123";
+    final ImmutableMap<SystemStreamPartition, String> processedOffsets = ImmutableMap.of(ssp, offset);
 
     TaskSideInputStorageManager testSideInputStorageManager = new MockTaskSideInputStorageManagerBuilder(taskName, NON_LOGGED_STORE_DIR)
         .addInMemoryStore(storeName, ImmutableSet.of())
         .build();
 
     initializeSideInputStorageManager(testSideInputStorageManager);
-    testSideInputStorageManager.stop();
+    testSideInputStorageManager.stop(processedOffsets);
 
     verify(testSideInputStorageManager.getStore(storeName)).stop();
-    verify(testSideInputStorageManager).writeOffsetFiles();
+    verify(testSideInputStorageManager).writeFileOffsets(eq(processedOffsets));
   }
 
   @Test
@@ -118,7 +118,7 @@
         .build();
 
     initializeSideInputStorageManager(testSideInputStorageManager);
-    testSideInputStorageManager.writeOffsetFiles(); // should be no-op
+    testSideInputStorageManager.writeFileOffsets(Collections.emptyMap()); // should be no-op
     File storeDir = testSideInputStorageManager.getStoreLocation(storeName);
 
     assertFalse("Store directory: " + storeDir.getPath() + " should not be created for non-persisted store", storeDir.exists());
@@ -134,15 +134,15 @@
     final SystemStreamPartition ssp = new SystemStreamPartition("test-system", "test-stream", new Partition(0));
     final SystemStreamPartition ssp2 = new SystemStreamPartition("test-system2", "test-stream2", new Partition(0));
 
+    Map<SystemStreamPartition, String> processedOffsets = ImmutableMap.of(ssp, offset, ssp2, offset);
+
     TaskSideInputStorageManager testSideInputStorageManager = new MockTaskSideInputStorageManagerBuilder(taskName, LOGGED_STORE_DIR)
         .addLoggedStore(storeName, ImmutableSet.of(ssp))
         .addLoggedStore(storeName2, ImmutableSet.of(ssp2))
         .build();
 
     initializeSideInputStorageManager(testSideInputStorageManager);
-    testSideInputStorageManager.updateLastProcessedOffset(ssp, offset);
-    testSideInputStorageManager.updateLastProcessedOffset(ssp2, offset);
-    testSideInputStorageManager.writeOffsetFiles();
+    testSideInputStorageManager.writeFileOffsets(processedOffsets);
     File storeDir = testSideInputStorageManager.getStoreLocation(storeName);
 
     assertTrue("Store directory: " + storeDir.getPath() + " is missing.", storeDir.exists());
@@ -171,57 +171,19 @@
         .build();
 
     initializeSideInputStorageManager(testSideInputStorageManager);
-    ssps.forEach(ssp -> testSideInputStorageManager.updateLastProcessedOffset(ssp, offset));
-    testSideInputStorageManager.writeOffsetFiles();
+    Map<SystemStreamPartition, String> processedOffsets = ssps.stream()
+        .collect(Collectors.toMap(Function.identity(), ssp -> offset));
+
+    testSideInputStorageManager.writeFileOffsets(processedOffsets);
 
     Map<SystemStreamPartition, String> fileOffsets = testSideInputStorageManager.getFileOffsets();
-
     ssps.forEach(ssp -> {
-        assertTrue("Failed to get offset for ssp: " + ssp.toString() + " from file.", fileOffsets.containsKey(ssp));
-        assertEquals("Mismatch between last processed offset and file offset.", fileOffsets.get(ssp), offset);
-      });
-  }
-
-  @Test
-  public void testGetStartingOffsets() {
-    final String storeName = "test-get-starting-offset-store";
-    final String taskName = "test-get-starting-offset-task";
-
-    Set<SystemStreamPartition> ssps = IntStream.range(1, 6)
-        .mapToObj(idx -> new SystemStreamPartition("test-system", "test-stream", new Partition(idx)))
-        .collect(Collectors.toSet());
-
-
-    TaskSideInputStorageManager testSideInputStorageManager = new MockTaskSideInputStorageManagerBuilder(taskName, LOGGED_STORE_DIR)
-        .addLoggedStore(storeName, ssps)
-        .build();
-
-    initializeSideInputStorageManager(testSideInputStorageManager);
-    Map<SystemStreamPartition, String> fileOffsets = ssps.stream()
-        .collect(Collectors.toMap(Function.identity(), ssp -> {
-            int partitionId = ssp.getPartition().getPartitionId();
-            int offset = partitionId % 2 == 0 ? partitionId + 10 : partitionId;
-            return String.valueOf(offset);
-          }));
-
-    Map<SystemStreamPartition, String> oldestOffsets = ssps.stream()
-        .collect(Collectors.toMap(Function.identity(), ssp -> {
-            int partitionId = ssp.getPartition().getPartitionId();
-            int offset = partitionId % 2 == 0 ? partitionId : partitionId + 10;
-
-            return String.valueOf(offset);
-          }));
-
-    doCallRealMethod().when(testSideInputStorageManager).getStartingOffsets(fileOffsets, oldestOffsets);
-
-    Map<SystemStreamPartition, String> startingOffsets =
-        testSideInputStorageManager.getStartingOffsets(fileOffsets, oldestOffsets);
-
-    assertTrue("Failed to get starting offsets for all ssps", startingOffsets.size() == 5);
+      assertTrue("Failed to get offset for ssp: " + ssp.toString() + " from file.", fileOffsets.containsKey(ssp));
+      assertEquals("Mismatch between last processed offset and file offset.", fileOffsets.get(ssp), offset);
+    });
   }
 
   private void initializeSideInputStorageManager(TaskSideInputStorageManager testSideInputStorageManager) {
-    doReturn(new HashMap<>()).when(testSideInputStorageManager).getStartingOffsets(any(), any());
     testSideInputStorageManager.init();
   }
 
@@ -229,39 +191,13 @@
     private final TaskName taskName;
     private final String storeBaseDir;
 
-    private Clock clock = mock(Clock.class);
-    private Map<String, SideInputsProcessor> storeToProcessor = new HashMap<>();
     private Map<String, StorageEngine> stores = new HashMap<>();
     private Map<String, Set<SystemStreamPartition>> storeToSSps = new HashMap<>();
-    private StreamMetadataCache streamMetadataCache = mock(StreamMetadataCache.class);
-    private SystemAdmins systemAdmins = mock(SystemAdmins.class);
+    private Clock clock = mock(Clock.class);
 
     public MockTaskSideInputStorageManagerBuilder(String taskName, String storeBaseDir) {
       this.taskName = new TaskName(taskName);
       this.storeBaseDir = storeBaseDir;
-
-      initializeMocks();
-    }
-
-    private void initializeMocks() {
-      SystemAdmin admin = mock(SystemAdmin.class);
-      doAnswer(invocation -> {
-          String offset1 = invocation.getArgumentAt(0, String.class);
-          String offset2 = invocation.getArgumentAt(1, String.class);
-
-          return Long.compare(Long.parseLong(offset1), Long.parseLong(offset2));
-        }).when(admin).offsetComparator(any(), any());
-      doAnswer(invocation -> {
-          Map<SystemStreamPartition, String> sspToOffsets = invocation.getArgumentAt(0, Map.class);
-
-          return sspToOffsets.entrySet()
-              .stream()
-              .collect(Collectors.toMap(Map.Entry::getKey,
-                  entry -> String.valueOf(Long.parseLong(entry.getValue()) + 1)));
-        }).when(admin).getOffsetsAfter(any());
-      doReturn(admin).when(systemAdmins).getSystemAdmin("test-system");
-
-      doReturn(ScalaJavaUtil.toScalaMap(new HashMap<>())).when(streamMetadataCache).getStreamMetadata(any(), anyBoolean());
     }
 
     MockTaskSideInputStorageManagerBuilder addInMemoryStore(String storeName, Set<SystemStreamPartition> ssps) {
@@ -270,7 +206,6 @@
           new StoreProperties.StorePropertiesBuilder().setLoggedStore(false).setPersistedToDisk(false).build());
 
       stores.put(storeName, storageEngine);
-      storeToProcessor.put(storeName, mock(SideInputsProcessor.class));
       storeToSSps.put(storeName, ssps);
 
       return this;
@@ -282,15 +217,14 @@
           new StoreProperties.StorePropertiesBuilder().setLoggedStore(false).setPersistedToDisk(true).build());
 
       stores.put(storeName, storageEngine);
-      storeToProcessor.put(storeName, mock(SideInputsProcessor.class));
       storeToSSps.put(storeName, ssps);
 
       return this;
     }
 
     TaskSideInputStorageManager build() {
-      return spy(new TaskSideInputStorageManager(taskName, TaskMode.Active, streamMetadataCache, new File(storeBaseDir), stores,
-          storeToProcessor, storeToSSps, systemAdmins, mock(Config.class), clock));
+      return spy(new TaskSideInputStorageManager(taskName, TaskMode.Active, new File(storeBaseDir), stores, storeToSSps,
+          clock));
     }
   }
 }
\ No newline at end of file
diff --git a/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java b/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java
index c37cca3..2bdd6c3 100644
--- a/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java
+++ b/samza-core/src/test/java/org/apache/samza/storage/TestTransactionalStateTaskRestoreManager.java
@@ -205,10 +205,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     File dummyCurrentDir = new File("currentDir");
     File dummyCheckpointDir = new File("checkpointDir1");
@@ -273,10 +273,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -339,10 +339,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -406,10 +406,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -474,10 +474,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -546,10 +546,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -610,10 +610,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -673,10 +673,10 @@
         .thenReturn(mockCurrentStoreDir);
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -754,10 +754,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -837,10 +837,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -921,10 +921,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1006,10 +1006,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1091,10 +1091,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1185,10 +1185,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1280,10 +1280,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1367,13 +1367,13 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            if (offset1 == null || offset2 == null) {
-              return -1;
-            }
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          if (offset1 == null || offset2 == null) {
+            return -1;
+          }
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1462,13 +1462,13 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            if (offset1 == null || offset2 == null) {
-              return -1;
-            }
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          if (offset1 == null || offset2 == null) {
+            return -1;
+          }
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1555,10 +1555,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1646,10 +1646,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1736,10 +1736,10 @@
 
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
 
     StoreActions storeActions = TransactionalStateTaskRestoreManager.getStoreActions(
         mockTaskModel, mockStoreEngines, mockStoreChangelogs, mockCheckpointedChangelogOffset,
@@ -1897,17 +1897,17 @@
     when(mockSystemAdmins.getSystemAdmin(eq(changelogSystemName))).thenReturn(mockSystemAdmin);
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
     Mockito.when(mockSystemAdmin.getOffsetsAfter(any()))
         .thenAnswer((Answer<Map<SystemStreamPartition, String>>) invocation -> {
-            Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
-            Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
-            offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
-            return nextOffsets;
-          });
+          Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
+          Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
+          offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
+          return nextOffsets;
+        });
 
     SystemConsumer mockSystemConsumer = mock(SystemConsumer.class);
     Map<String, SystemConsumer> mockStoreConsumers = ImmutableMap.of(
@@ -1962,17 +1962,17 @@
     when(mockSystemAdmins.getSystemAdmin(eq(changelogSystemName))).thenReturn(mockSystemAdmin);
     Mockito.when(mockSystemAdmin.offsetComparator(anyString(), anyString()))
         .thenAnswer((Answer<Integer>) invocation -> {
-            String offset1 = (String) invocation.getArguments()[0];
-            String offset2 = (String) invocation.getArguments()[1];
-            return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
-          });
+          String offset1 = (String) invocation.getArguments()[0];
+          String offset2 = (String) invocation.getArguments()[1];
+          return Long.valueOf(offset1).compareTo(Long.valueOf(offset2));
+        });
     Mockito.when(mockSystemAdmin.getOffsetsAfter(any()))
         .thenAnswer((Answer<Map<SystemStreamPartition, String>>) invocation -> {
-            Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
-            Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
-            offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
-            return nextOffsets;
-          });
+          Map<SystemStreamPartition, String> offsets = (Map<SystemStreamPartition, String>) invocation.getArguments()[0];
+          Map<SystemStreamPartition, String> nextOffsets = new HashMap<>();
+          offsets.forEach((ssp, offset) -> nextOffsets.put(ssp, Long.toString(Long.valueOf(offset) + 1)));
+          return nextOffsets;
+        });
 
     SystemConsumer mockSystemConsumer = mock(SystemConsumer.class);
     Map<String, SystemConsumer> mockStoreConsumers = ImmutableMap.of("store1", mockSystemConsumer);
diff --git a/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java b/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java
index e3030ab..73cae4e 100644
--- a/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java
+++ b/samza-core/src/test/java/org/apache/samza/system/MockSystemFactory.java
@@ -54,13 +54,13 @@
       public Map<SystemStreamPartition, List<IncomingMessageEnvelope>> poll(Set<SystemStreamPartition> systemStreamPartitions, long timeout) {
         Map<SystemStreamPartition, List<IncomingMessageEnvelope>> retQueues = new HashMap<>();
         systemStreamPartitions.forEach(ssp -> {
-            List<IncomingMessageEnvelope> msgs = MSG_QUEUES.get(ssp);
-            if (msgs == null) {
-              retQueues.put(ssp, new ArrayList<>());
-            } else {
-              retQueues.put(ssp, MSG_QUEUES.remove(ssp));
-            }
-          });
+          List<IncomingMessageEnvelope> msgs = MSG_QUEUES.get(ssp);
+          if (msgs == null) {
+            retQueues.put(ssp, new ArrayList<>());
+          } else {
+            retQueues.put(ssp, MSG_QUEUES.remove(ssp));
+          }
+        });
         return retQueues;
       }
     };
@@ -124,44 +124,46 @@
         Map<String, Set<Partition>> partitionMap = MSG_QUEUES.entrySet()
             .stream()
             .filter(entry -> streamNames.contains(entry.getKey().getSystemStream().getStream()))
-            .map(e -> e.getKey()).<Map<String, Set<Partition>>>collect(HashMap::new, (m, ssp) -> {
+            .map(e -> e.getKey())
+            .<Map<String, Set<Partition>>>collect(HashMap::new,
+              (m, ssp) -> {
                 if (m.get(ssp.getStream()) == null) {
                   m.put(ssp.getStream(), new HashSet<>());
                 }
                 m.get(ssp.getStream()).add(ssp.getPartition());
               }, (m1, m2) -> {
                 m2.forEach((k, v) -> {
-                    if (m1.get(k) == null) {
-                      m1.put(k, v);
-                    } else {
-                      m1.get(k).addAll(v);
-                    }
-                  });
+                  if (m1.get(k) == null) {
+                    m1.put(k, v);
+                  } else {
+                    m1.get(k).addAll(v);
+                  }
+                });
               });
 
         partitionMap.forEach((k, v) -> {
-            Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetaMap =
-                v.stream().<Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata>>collect(HashMap::new,
-                  (m, p) -> {
-                    m.put(p, new SystemStreamMetadata.SystemStreamPartitionMetadata("", "", ""));
-                  }, (m1, m2) -> m1.putAll(m2));
+          Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetaMap =
+              v.stream().<Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata>>collect(HashMap::new,
+                (m, p) -> {
+                  m.put(p, new SystemStreamMetadata.SystemStreamPartitionMetadata("", "", ""));
+                }, (m1, m2) -> m1.putAll(m2));
 
-            metadataMap.put(k, new SystemStreamMetadata(k, partitionMetaMap));
-          });
+          metadataMap.put(k, new SystemStreamMetadata(k, partitionMetaMap));
+        });
 
         return metadataMap;
       }
 
       @Override
       public Integer offsetComparator(String offset1, String offset2) {
-        return null;
+        return offset1.compareTo(offset2);
       }
 
       @Override
       public Map<String, SystemStreamMetadata> getSystemStreamPartitionCounts(Set<String> streamNames, long cacheTTL) {
         return getSystemStreamMetadata(streamNames);
       }
-      
+
       @Override
       public boolean createStream(StreamSpec streamSpec) {
         return true;
diff --git a/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java b/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java
index efe09d1..f08d069 100644
--- a/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java
+++ b/samza-core/src/test/java/org/apache/samza/system/TestSSPMetadataCache.java
@@ -241,12 +241,12 @@
     SSPMetadataCache cache = buildSSPMetadataCache(ssps);
     ExecutorService executorService = Executors.newFixedThreadPool(10);
     when(systemAdmin.getSSPMetadata(ssps)).thenAnswer(invocation -> {
-        // have the admin call wait so that it forces the threads to overlap on the lock
-        Thread.sleep(500);
-        return IntStream.range(0, numPartitions)
-            .boxed()
-            .collect(Collectors.toMap(TestSSPMetadataCache::buildSSP, i -> sspMetadata((long) i)));
-      });
+      // have the admin call wait so that it forces the threads to overlap on the lock
+      Thread.sleep(500);
+      return IntStream.range(0, numPartitions)
+          .boxed()
+          .collect(Collectors.toMap(TestSSPMetadataCache::buildSSP, i -> sspMetadata((long) i)));
+    });
 
     // send concurrent requests for metadata
     List<Future<SystemStreamMetadata.SystemStreamPartitionMetadata>> getMetadataFutures =
diff --git a/samza-core/src/test/java/org/apache/samza/system/inmemory/TestInMemoryManager.java b/samza-core/src/test/java/org/apache/samza/system/inmemory/TestInMemoryManager.java
index 7a32483..890b669 100644
--- a/samza-core/src/test/java/org/apache/samza/system/inmemory/TestInMemoryManager.java
+++ b/samza-core/src/test/java/org/apache/samza/system/inmemory/TestInMemoryManager.java
@@ -26,6 +26,7 @@
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import org.apache.samza.Partition;
+import org.apache.samza.system.EndOfStreamMessage;
 import org.apache.samza.system.IncomingMessageEnvelope;
 import org.apache.samza.system.StreamSpec;
 import org.apache.samza.system.SystemStreamMetadata;
@@ -77,9 +78,17 @@
         ImmutableMap.of(new Partition(0), new SystemStreamMetadata.SystemStreamPartitionMetadata("0", "1", "2")));
     SystemStreamMetadata systemStreamMetadata1 = new SystemStreamMetadata(STREAM1,
         ImmutableMap.of(new Partition(0), new SystemStreamMetadata.SystemStreamPartitionMetadata("0", "0", "1")));
+
     // also test a batch call for multiple streams here
     assertEquals(ImmutableMap.of(STREAM0, systemStreamMetadata0, STREAM1, systemStreamMetadata1),
         this.inMemoryManager.getSystemStreamMetadata(SYSTEM, ImmutableSet.of(STREAM0, STREAM1)));
+
+    // test END_OF_STREAM doesn't alter new or upcoming offset
+    this.inMemoryManager.put(ssp0, "key02", new EndOfStreamMessage());
+    systemStreamMetadata0 = new SystemStreamMetadata(STREAM0,
+        ImmutableMap.of(new Partition(0), new SystemStreamMetadata.SystemStreamPartitionMetadata("0", "1", "2")));
+    assertEquals(ImmutableMap.of(STREAM0, systemStreamMetadata0),
+        this.inMemoryManager.getSystemStreamMetadata(SYSTEM, ImmutableSet.of(STREAM0)));
   }
 
   @Test
diff --git a/samza-core/src/test/java/org/apache/samza/table/batching/TestBatchProcessor.java b/samza-core/src/test/java/org/apache/samza/table/batching/TestBatchProcessor.java
index 2de3170..44ea246 100644
--- a/samza-core/src/test/java/org/apache/samza/table/batching/TestBatchProcessor.java
+++ b/samza-core/src/test/java/org/apache/samza/table/batching/TestBatchProcessor.java
@@ -23,25 +23,21 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Executors;
 import java.util.function.Supplier;
 import org.apache.samza.table.ReadWriteTable;
 import org.junit.Assert;
 import org.junit.Test;
 
-import static java.lang.Thread.*;
-import static org.mockito.Mockito.*;
+import static java.lang.Thread.sleep;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyList;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 
 public class TestBatchProcessor {
-  private static final int SLOW_OPERATION_TIME_MS = 500;
-  private static final Supplier<Void> SLOW_UPDATE_SUPPLIER = () -> {
-    try {
-      sleep(SLOW_OPERATION_TIME_MS);
-    } catch (InterruptedException e) {
-      // ignore
-    }
-    return null;
-  };
 
   public static class TestCreate {
     @Test
@@ -86,9 +82,18 @@
     @Test
     public void testBatchOperationTriggeredByBatchSize() {
       final int maxBatchSize = 3;
+      final CountDownLatch batchCompletionTriggerLatch = new CountDownLatch(1);
+      final Supplier<Void> tableUpdateSupplier = () -> {
+        try {
+          batchCompletionTriggerLatch.await();
+        } catch (InterruptedException e) {
+          // ignore
+        }
+        return null;
+      };
 
       final ReadWriteTable<Integer, Integer> table = mock(ReadWriteTable.class);
-      when(table.putAllAsync(anyList())).thenReturn(CompletableFuture.supplyAsync(SLOW_UPDATE_SUPPLIER));
+      when(table.putAllAsync(anyList())).thenReturn(CompletableFuture.supplyAsync(tableUpdateSupplier));
 
       final BatchProcessor<Integer, Integer> batchProcessor =
           createBatchProcessor(table, maxBatchSize, Integer.MAX_VALUE);
@@ -104,15 +109,12 @@
       }
       Assert.assertEquals(0, batchProcessor.size());
 
-      try {
-        sleep(SLOW_OPERATION_TIME_MS * 2);
-      } catch (InterruptedException e) {
-        // ignore
-      }
-
-      for (int i = 0; i < maxBatchSize; i++) {
-        Assert.assertTrue(futureList.get(i).isDone());
-      }
+      // Complete the async call to the underlying table
+      batchCompletionTriggerLatch.countDown();
+      // The latch should eventually trigger completion to the future returned by the batch processor
+      CompletableFuture
+          .allOf(futureList.toArray(new CompletableFuture[0]))
+          .join();
     }
 
     @Test
diff --git a/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java b/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java
index e436a06..04656a5 100644
--- a/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java
+++ b/samza-core/src/test/java/org/apache/samza/table/caching/TestCachingTable.java
@@ -118,21 +118,21 @@
     final ReadWriteTable cacheTable = mock(ReadWriteTable.class);
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        String value = invocation.getArgumentAt(1, String.class);
-        cacheStore.put(key, value);
-        return null;
-      }).when(cacheTable).put(any(), any());
+      String key = invocation.getArgumentAt(0, String.class);
+      String value = invocation.getArgumentAt(1, String.class);
+      cacheStore.put(key, value);
+      return null;
+    }).when(cacheTable).put(any(), any());
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        return cacheStore.get(key);
-      }).when(cacheTable).get(any());
+      String key = invocation.getArgumentAt(0, String.class);
+      return cacheStore.get(key);
+    }).when(cacheTable).get(any());
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        return cacheStore.remove(key);
-      }).when(cacheTable).delete(any());
+      String key = invocation.getArgumentAt(0, String.class);
+      return cacheStore.remove(key);
+    }).when(cacheTable).delete(any());
 
     return Pair.of(cacheTable, cacheStore);
   }
@@ -171,24 +171,24 @@
     final ReadWriteTable realTable = mock(ReadWriteTable.class);
 
     doAnswer(invocation -> {
-        String key = invocation.getArgumentAt(0, String.class);
-        return CompletableFuture.completedFuture("test-data-" + key);
-      }).when(realTable).getAsync(any());
+      String key = invocation.getArgumentAt(0, String.class);
+      return CompletableFuture.completedFuture("test-data-" + key);
+    }).when(realTable).getAsync(any());
 
     doReturn(CompletableFuture.completedFuture(null)).when(realTable).putAsync(any(), any());
 
     doAnswer(invocation -> {
-        String tableId = invocation.getArgumentAt(0, String.class);
-        if (tableId.equals("realTable")) {
-          // cache
-          return realTable;
-        } else if (tableId.equals("cacheTable")) {
-          return cacheTable;
-        }
+      String tableId = invocation.getArgumentAt(0, String.class);
+      if (tableId.equals("realTable")) {
+        // cache
+        return realTable;
+      } else if (tableId.equals("cacheTable")) {
+        return cacheTable;
+      }
 
-        Assert.fail();
-        return null;
-      }).when(context.getTaskContext()).getTable(anyString());
+      Assert.fail();
+      return null;
+    }).when(context.getTaskContext()).getTable(anyString());
 
     when(context.getContainerContext().getContainerMetricsRegistry()).thenReturn(new NoOpMetricsRegistry());
 
diff --git a/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java b/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java
index 718aa2c..ed49cf8 100644
--- a/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java
+++ b/samza-core/src/test/java/org/apache/samza/table/remote/TestRemoteTable.java
@@ -194,9 +194,9 @@
 
     CompletableFuture.allOf(future1, future2)
         .thenAccept(u -> {
-            Assert.assertEquals(future1.join(), "bar1");
-            Assert.assertEquals(future2.join(), "bar1");
-          });
+          Assert.assertEquals(future1.join(), "bar1");
+          Assert.assertEquals(future2.join(), "bar1");
+        });
   }
 
   public void doTestRead(boolean sync, boolean error) {
@@ -556,10 +556,10 @@
     Thread testThread = Thread.currentThread();
 
     table.getAsync("foo").thenAccept(result -> {
-        Assert.assertEquals("bar", result);
-        // Must be executed on the executor thread
-        Assert.assertNotSame(testThread, Thread.currentThread());
-      });
+      Assert.assertEquals("bar", result);
+      // Must be executed on the executor thread
+      Assert.assertNotSame(testThread, Thread.currentThread());
+    });
   }
 
   @Test
diff --git a/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java b/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java
index ec4307d..f80f623 100644
--- a/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java
+++ b/samza-core/src/test/java/org/apache/samza/table/retry/TestAsyncRetriableTable.java
@@ -188,15 +188,15 @@
     map.put("foo1", "bar1");
     map.put("foo2", "bar2");
     doAnswer(invocation -> {
-        CompletableFuture<Map<String, String>> future = new CompletableFuture();
-        if (times.get() > 0) {
-          future.complete(map);
-        } else {
-          times.incrementAndGet();
-          future.completeExceptionally(new RuntimeException("test exception"));
-        }
-        return future;
-      }).when(readFn).getAllAsync(anyCollection());
+      CompletableFuture<Map<String, String>> future = new CompletableFuture();
+      if (times.get() > 0) {
+        future.complete(map);
+      } else {
+        times.incrementAndGet();
+        future.completeExceptionally(new RuntimeException("test exception"));
+      }
+      return future;
+    }).when(readFn).getAllAsync(anyCollection());
 
     AsyncReadWriteTable delegate = new AsyncRemoteTable(readFn, null);
     AsyncRetriableTable table = new AsyncRetriableTable("t1", delegate, policy, null, schedExec, readFn, null);
@@ -399,15 +399,15 @@
 
     AtomicInteger times = new AtomicInteger();
     doAnswer(invocation -> {
-        CompletableFuture<Map<String, String>> future = new CompletableFuture();
-        if (times.get() > 0) {
-          future.complete(null);
-        } else {
-          times.incrementAndGet();
-          future.completeExceptionally(new RuntimeException("test exception"));
-        }
-        return future;
-      }).when(writeFn).putAllAsync(any());
+      CompletableFuture<Map<String, String>> future = new CompletableFuture();
+      if (times.get() > 0) {
+        future.complete(null);
+      } else {
+        times.incrementAndGet();
+        future.completeExceptionally(new RuntimeException("test exception"));
+      }
+      return future;
+    }).when(writeFn).putAllAsync(any());
 
     AsyncReadWriteTable delegate = new AsyncRemoteTable(readFn, writeFn);
     AsyncRetriableTable table = new AsyncRetriableTable("t1", delegate, null, policy, schedExec, readFn, writeFn);
diff --git a/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java b/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java
index 330cab9..2d43c63 100644
--- a/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java
+++ b/samza-core/src/test/java/org/apache/samza/task/TestStreamOperatorTask.java
@@ -72,9 +72,9 @@
     CountDownLatch failureLatch = new CountDownLatch(1);
 
     doAnswer(ctx -> {
-        failureLatch.countDown();
-        return null;
-      }).when(mockTaskCallback).failure(anyObject());
+      failureLatch.countDown();
+      return null;
+    }).when(mockTaskCallback).failure(anyObject());
 
     operatorTask.processAsync(mock(IncomingMessageEnvelope.class), mockMessageCollector,
         mockTaskCoordinator, mockTaskCallback);
diff --git a/samza-core/src/test/java/org/apache/samza/util/TestDiagnosticsUtil.java b/samza-core/src/test/java/org/apache/samza/util/TestDiagnosticsUtil.java
new file mode 100644
index 0000000..f817c47
--- /dev/null
+++ b/samza-core/src/test/java/org/apache/samza/util/TestDiagnosticsUtil.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.util;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.samza.config.Config;
+import org.apache.samza.config.JobConfig;
+import org.apache.samza.config.MapConfig;
+import org.apache.samza.config.MetricsConfig;
+import org.apache.samza.config.SystemConfig;
+import org.apache.samza.diagnostics.DiagnosticsManager;
+import org.apache.samza.job.model.JobModel;
+import org.apache.samza.metrics.MetricsRegistry;
+import org.apache.samza.metrics.MetricsReporterFactory;
+import org.apache.samza.metrics.reporter.MetricsSnapshotReporter;
+import org.apache.samza.system.SystemFactory;
+import org.apache.samza.system.SystemProducer;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import static org.mockito.Mockito.*;
+
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({ReflectionUtil.class})
+public class TestDiagnosticsUtil {
+
+  private static final String STREAM_NAME = "someStreamName";
+  private static final String JOB_NAME = "someJob";
+  private static final String JOB_ID = "someId";
+  private static final String CONTAINER_ID = "someContainerId";
+  private static final String ENV_ID = "someEnvID";
+  public static final String REPORTER_FACTORY = "org.apache.samza.metrics.reporter.MetricsSnapshotReporterFactory";
+  public static final String SYSTEM_FACTORY = "com.foo.system.SomeSystemFactory";
+
+  @Test
+  public void testBuildDiagnosticsManagerReturnsConfiguredReporter() {
+    Config config = new MapConfig(buildTestConfigs());
+    JobModel mockJobModel = mock(JobModel.class);
+    SystemFactory systemFactory = mock(SystemFactory.class);
+    SystemProducer mockProducer = mock(SystemProducer.class);
+    MetricsReporterFactory metricsReporterFactory = mock(MetricsReporterFactory.class);
+    MetricsSnapshotReporter mockReporter = mock(MetricsSnapshotReporter.class);
+
+    when(systemFactory.getProducer(anyString(), any(Config.class), any(MetricsRegistry.class))).thenReturn(mockProducer);
+    when(metricsReporterFactory.getMetricsReporter(anyString(), anyString(), any(Config.class))).thenReturn(
+        mockReporter);
+    PowerMockito.mockStatic(ReflectionUtil.class);
+    when(ReflectionUtil.getObj(REPORTER_FACTORY, MetricsReporterFactory.class)).thenReturn(metricsReporterFactory);
+    when(ReflectionUtil.getObj(SYSTEM_FACTORY, SystemFactory.class)).thenReturn(systemFactory);
+
+    Optional<Pair<DiagnosticsManager, MetricsSnapshotReporter>> managerReporterPair =
+        DiagnosticsUtil.buildDiagnosticsManager(JOB_NAME, JOB_ID, mockJobModel, CONTAINER_ID, Optional.of(ENV_ID),
+            config);
+
+    Assert.assertTrue(managerReporterPair.isPresent());
+    Assert.assertEquals(mockReporter, managerReporterPair.get().getValue());
+  }
+
+  private Map<String, String> buildTestConfigs() {
+    Map<String, String> configs = new HashMap<>();
+    configs.put(JobConfig.JOB_DIAGNOSTICS_ENABLED, "true");
+    configs.put(String.format(MetricsConfig.METRICS_REPORTER_FACTORY,
+        MetricsConfig.METRICS_SNAPSHOT_REPORTER_NAME_FOR_DIAGNOSTICS), REPORTER_FACTORY);
+    configs.put(String.format(MetricsConfig.METRICS_SNAPSHOT_REPORTER_STREAM,
+        MetricsConfig.METRICS_SNAPSHOT_REPORTER_NAME_FOR_DIAGNOSTICS),
+        MetricsConfig.METRICS_SNAPSHOT_REPORTER_NAME_FOR_DIAGNOSTICS + "." + STREAM_NAME);
+    configs.put(String.format(SystemConfig.SYSTEM_FACTORY_FORMAT, MetricsConfig.METRICS_SNAPSHOT_REPORTER_NAME_FOR_DIAGNOSTICS),
+        SYSTEM_FACTORY);
+
+    return configs;
+  }
+}
diff --git a/samza-core/src/test/java/org/apache/samza/util/TestSplitDeploymentUtil.java b/samza-core/src/test/java/org/apache/samza/util/TestSplitDeploymentUtil.java
new file mode 100644
index 0000000..d1dd8f8
--- /dev/null
+++ b/samza-core/src/test/java/org/apache/samza/util/TestSplitDeploymentUtil.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.util;
+
+import org.apache.samza.clustermanager.ClusterBasedJobCoordinator;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import static org.junit.Assert.*;
+import static org.mockito.AdditionalMatchers.*;
+import static org.mockito.Matchers.*;
+import static org.mockito.Mockito.verify;
+import static org.powermock.api.mockito.PowerMockito.*;
+
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({ClusterBasedJobCoordinator.class})
+public class TestSplitDeploymentUtil {
+
+  @Test
+  public void testRunWithIsolatingClassLoader() throws Exception {
+    // partially mock ClusterBasedJobCoordinator (mock runClusterBasedJobCoordinator method only)
+    PowerMockito.spy(ClusterBasedJobCoordinator.class);
+    // save the context classloader to make sure that it gets set properly once the test is finished
+    ClassLoader previousContextClassLoader = Thread.currentThread().getContextClassLoader();
+    ClassLoader classLoader = mock(ClassLoader.class);
+    String[] args = new String[]{"arg0", "arg1"};
+    doReturn(ClusterBasedJobCoordinator.class).when(classLoader).loadClass(ClusterBasedJobCoordinator.class.getName());
+
+    // stub the private static method which is called by reflection
+    PowerMockito.doAnswer(invocation -> {
+        // make sure the only calls to this method has the expected arguments
+      assertArrayEquals(args, invocation.getArgumentAt(0, String[].class));
+      // checks that the context classloader is set correctly
+      assertEquals(classLoader, Thread.currentThread().getContextClassLoader());
+      return null;
+    }).when(ClusterBasedJobCoordinator.class, "runClusterBasedJobCoordinator", any());
+
+    try {
+      SplitDeploymentUtil.runWithClassLoader(classLoader,
+          ClusterBasedJobCoordinator.class, "runClusterBasedJobCoordinator", args);
+      assertEquals(previousContextClassLoader, Thread.currentThread().getContextClassLoader());
+    } finally {
+      // reset it explicitly just in case runWithClassLoader throws an exception
+      Thread.currentThread().setContextClassLoader(previousContextClassLoader);
+    }
+    // make sure that the classloader got used
+    verify(classLoader).loadClass(ClusterBasedJobCoordinator.class.getName());
+    // make sure runClusterBasedJobCoordinator only got called once
+    verifyPrivate(ClusterBasedJobCoordinator.class).invoke("runClusterBasedJobCoordinator", new Object[]{aryEq(args)});
+  }
+}
diff --git a/samza-core/src/test/java/org/apache/samza/util/TestUtil.java b/samza-core/src/test/java/org/apache/samza/util/TestUtil.java
index 2eb2e89..4fbd8cb 100644
--- a/samza-core/src/test/java/org/apache/samza/util/TestUtil.java
+++ b/samza-core/src/test/java/org/apache/samza/util/TestUtil.java
@@ -47,11 +47,11 @@
   @Test
   public void testEnvVarEscape() {
     // no special characters in original
-    String noSpecialCharacters = "hello world 123 .?!";
+    String noSpecialCharacters = "hello world 123 .?! '";
     assertEquals(noSpecialCharacters, Util.envVarEscape(noSpecialCharacters));
 
-    String withSpecialCharacters = "quotation \" apostrophe '";
-    String escaped = "quotation \\\" apostrophe \\'";
+    String withSpecialCharacters = "quotation \" backslash \\ grave accent `";
+    String escaped = "quotation \\\" backslash \\\\ grave accent \\`";
     assertEquals(escaped, Util.envVarEscape(withSpecialCharacters));
   }
 
diff --git a/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java b/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java
index 67b2d45..c63e66b 100644
--- a/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java
+++ b/samza-core/src/test/java/org/apache/samza/zk/TestScheduleAfterDebounceTime.java
@@ -59,9 +59,9 @@
 
     final TestObj testObj = new TestScheduleAfterDebounceTime.TestObj();
     scheduledQueue.scheduleAfterDebounceTime("TEST1", WAIT_TIME, () -> {
-        testObj.inc();
-        latch.countDown();
-      });
+      testObj.inc();
+      latch.countDown();
+    });
     // action is delayed
     Assert.assertEquals(0, testObj.get());
 
@@ -105,14 +105,13 @@
     final Throwable[] taskCallbackException = new Exception[1];
     ScheduleAfterDebounceTime scheduledQueue = new ScheduleAfterDebounceTime(TEST_PROCESSOR_ID);
     scheduledQueue.setScheduledTaskCallback(throwable -> {
-        taskCallbackException[0] = throwable;
-        latch.countDown();
-      });
+      taskCallbackException[0] = throwable;
+      latch.countDown();
+    });
 
-    scheduledQueue.scheduleAfterDebounceTime("TEST1", WAIT_TIME, () ->
-      {
-        throw new RuntimeException("From the runnable!");
-      });
+    scheduledQueue.scheduleAfterDebounceTime("TEST1", WAIT_TIME, () -> {
+      throw new RuntimeException("From the runnable!");
+    });
 
     final TestObj testObj = new TestObj();
     scheduledQueue.scheduleAfterDebounceTime("TEST2", WAIT_TIME * 2, testObj::inc);
diff --git a/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java b/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java
index d98392d..13fc13b 100644
--- a/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java
+++ b/samza-core/src/test/java/org/apache/samza/zk/TestZkUtils.java
@@ -542,13 +542,13 @@
     ZkUtils zkUtils = new ZkUtils(KEY_BUILDER, zkClient, CONNECTION_TIMEOUT_MS, SESSION_TIMEOUT_MS, new NoOpMetricsRegistry());
 
     Thread threadToInterrupt = new Thread(() -> {
-        try {
-          latch.await();
-        } catch (InterruptedException e) {
-          Thread.currentThread().interrupt();
-        }
-        zkUtils.close();
-      });
+      try {
+        latch.await();
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+      }
+      zkUtils.close();
+    });
 
     threadToInterrupt.start();
 
diff --git a/samza-core/src/test/scala/org/apache/samza/container/TestTaskInstance.scala b/samza-core/src/test/scala/org/apache/samza/container/TestTaskInstance.scala
index 90f1b58..4cab185 100644
--- a/samza-core/src/test/scala/org/apache/samza/container/TestTaskInstance.scala
+++ b/samza-core/src/test/scala/org/apache/samza/container/TestTaskInstance.scala
@@ -21,6 +21,7 @@
 
 import java.util.Collections
 
+import com.google.common.collect.ImmutableSet
 import org.apache.samza.{Partition, SamzaException}
 import org.apache.samza.checkpoint.{Checkpoint, CheckpointedChangelogOffset, OffsetManager}
 import org.apache.samza.config.MapConfig
@@ -48,7 +49,7 @@
   private val TASK_NAME = new TaskName("taskName")
   private val SYSTEM_STREAM_PARTITION =
     new SystemStreamPartition(new SystemStream(SYSTEM_NAME, "test-stream"), new Partition(0))
-  private val SYSTEM_STREAM_PARTITIONS = Set(SYSTEM_STREAM_PARTITION)
+  private val SYSTEM_STREAM_PARTITIONS = ImmutableSet.of(SYSTEM_STREAM_PARTITION)
 
   @Mock
   private var task: AllTask = null
@@ -110,9 +111,12 @@
     when(this.offsetManager.getStartingOffset(TASK_NAME, SYSTEM_STREAM_PARTITION)).thenReturn(Some("0"))
     val envelope = new IncomingMessageEnvelope(SYSTEM_STREAM_PARTITION, "0", null, null)
     val coordinator = mock[ReadableCoordinator]
-    this.taskInstance.process(envelope, coordinator)
+    val callbackFactory = mock[TaskCallbackFactory]
+    val callback = mock[TaskCallback]
+    when(callbackFactory.createCallback()).thenReturn(callback)
+    this.taskInstance.process(envelope, coordinator, callbackFactory)
     assertEquals(1, this.taskInstanceExceptionHandler.numTimesCalled)
-    verify(this.task).process(envelope, this.collector, coordinator)
+    verify(this.task).processAsync(envelope, this.collector, coordinator, callback)
     verify(processesCounter).inc()
     verify(messagesActuallyProcessedCounter).inc()
   }
@@ -152,16 +156,6 @@
     verify(this.task).close()
   }
 
-  @Test
-  def testOffsetsAreUpdatedOnProcess() {
-    when(this.metrics.processes).thenReturn(mock[Counter])
-    when(this.metrics.messagesActuallyProcessed).thenReturn(mock[Counter])
-    when(this.offsetManager.getStartingOffset(TASK_NAME, SYSTEM_STREAM_PARTITION)).thenReturn(Some("2"))
-    this.taskInstance.process(new IncomingMessageEnvelope(SYSTEM_STREAM_PARTITION, "4", null, null),
-      mock[ReadableCoordinator])
-    verify(this.offsetManager).update(TASK_NAME, SYSTEM_STREAM_PARTITION, "4")
-  }
-
   /**
    * Tests that the init() method of task can override the existing offset assignment.
    * This helps verify wiring for the task context (i.e. offset manager).
@@ -199,12 +193,17 @@
     val newEnvelope0 = new IncomingMessageEnvelope(SYSTEM_STREAM_PARTITION, "5", null, null)
     val newEnvelope1 = new IncomingMessageEnvelope(SYSTEM_STREAM_PARTITION, "7", null, null)
 
-    this.taskInstance.process(oldEnvelope, mock[ReadableCoordinator])
-    this.taskInstance.process(newEnvelope0, mock[ReadableCoordinator])
-    this.taskInstance.process(newEnvelope1, mock[ReadableCoordinator])
-    verify(this.task).process(Matchers.eq(newEnvelope0), Matchers.eq(this.collector), any())
-    verify(this.task).process(Matchers.eq(newEnvelope1), Matchers.eq(this.collector), any())
-    verify(this.task, never()).process(Matchers.eq(oldEnvelope), any(), any())
+    val mockCoordinator = mock[ReadableCoordinator]
+    val mockCallback = mock[TaskCallback]
+    val mockCallbackFactory = mock[TaskCallbackFactory]
+    when(mockCallbackFactory.createCallback()).thenReturn(mockCallback)
+
+    this.taskInstance.process(oldEnvelope, mockCoordinator, mockCallbackFactory)
+    this.taskInstance.process(newEnvelope0, mockCoordinator, mockCallbackFactory)
+    this.taskInstance.process(newEnvelope1, mockCoordinator, mockCallbackFactory)
+    verify(this.task).processAsync(Matchers.eq(newEnvelope0), Matchers.eq(this.collector), Matchers.eq(mockCoordinator), Matchers.eq(mockCallback))
+    verify(this.task).processAsync(Matchers.eq(newEnvelope1), Matchers.eq(this.collector), Matchers.eq(mockCoordinator), Matchers.eq(mockCallback))
+    verify(this.task, never()).processAsync(Matchers.eq(oldEnvelope), any(), any(), any())
     verify(processesCounter, times(3)).inc()
     verify(messagesActuallyProcessedCounter, times(2)).inc()
   }
@@ -403,7 +402,7 @@
       offsetManager = offsetManagerMock,
       storageManager = this.taskStorageManager,
       tableManager = this.taskTableManager,
-      systemStreamPartitions = Set(ssp),
+      systemStreamPartitions = ImmutableSet.of(ssp),
       exceptionHandler = this.taskInstanceExceptionHandler,
       streamMetadataCache = cacheMock,
       inputStreamMetadata = Map.empty ++ inputStreamMetadata,
@@ -441,7 +440,7 @@
   /**
     * Task type which has all task traits, which can be mocked.
     */
-  trait AllTask extends StreamTask with InitableTask with ClosableTask with WindowableTask {}
+  trait AllTask extends AsyncStreamTask with InitableTask with ClosableTask with WindowableTask {}
 
   /**
     * Mock version of [TaskInstanceExceptionHandler] which just does a passthrough execution and keeps track of the
diff --git a/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java b/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java
index bdc7e0e..c36a3be 100644
--- a/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java
+++ b/samza-core/src/test/scala/org/apache/samza/storage/TestContainerStorageManager.java
@@ -87,8 +87,8 @@
   private void addMockedTask(String taskname, int changelogPartition) {
     TaskInstance mockTaskInstance = mock(TaskInstance.class);
     doAnswer(invocation -> {
-        return new TaskName(taskname);
-      }).when(mockTaskInstance).taskName();
+      return new TaskName(taskname);
+    }).when(mockTaskInstance).taskName();
 
     Gauge testGauge = mock(Gauge.class);
     this.tasks.put(new TaskName(taskname),
@@ -126,33 +126,33 @@
     when(mockStorageEngine.getStoreProperties())
         .thenReturn(new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).setPersistedToDisk(true).build());
     doAnswer(invocation -> {
-        return mockStorageEngine;
-      }).when(mockStorageEngineFactory).getStorageEngine(anyString(), any(), any(), any(), any(),
-            any(), any(), any(), any(), any());
+      return mockStorageEngine;
+    }).when(mockStorageEngineFactory).getStorageEngine(anyString(), any(), any(), any(), any(),
+        any(), any(), any(), any(), any());
 
     storageEngineFactories.put(STORE_NAME, mockStorageEngineFactory);
 
     // Add instrumentation to mocked storage engine, to record the number of store.restore() calls
     doAnswer(invocation -> {
-        storeRestoreCallCount++;
-        return null;
-      }).when(mockStorageEngine).restore(any());
+      storeRestoreCallCount++;
+      return null;
+    }).when(mockStorageEngine).restore(any());
 
     // Set the mocked stores' properties to be persistent
     doAnswer(invocation -> {
-        return new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).build();
-      }).when(mockStorageEngine).getStoreProperties();
+      return new StoreProperties.StorePropertiesBuilder().setLoggedStore(true).build();
+    }).when(mockStorageEngine).getStoreProperties();
 
     // Mock and setup sysconsumers
     SystemConsumer mockSystemConsumer = mock(SystemConsumer.class);
     doAnswer(invocation -> {
-        systemConsumerStartCount++;
-        return null;
-      }).when(mockSystemConsumer).start();
+      systemConsumerStartCount++;
+      return null;
+    }).when(mockSystemConsumer).start();
     doAnswer(invocation -> {
-        systemConsumerStopCount++;
-        return null;
-      }).when(mockSystemConsumer).stop();
+      systemConsumerStopCount++;
+      return null;
+    }).when(mockSystemConsumer).stop();
 
     // Create mocked system factories
     Map<String, SystemFactory> systemFactories = new HashMap<>();
@@ -160,9 +160,9 @@
     // Count the number of sysConsumers created
     SystemFactory mockSystemFactory = mock(SystemFactory.class);
     doAnswer(invocation -> {
-        this.systemConsumerCreationCount++;
-        return mockSystemConsumer;
-      }).when(mockSystemFactory).getConsumer(anyString(), any(), any());
+      this.systemConsumerCreationCount++;
+      return mockSystemConsumer;
+    }).when(mockSystemFactory).getConsumer(anyString(), any(), any());
 
     systemFactories.put(SYSTEM_NAME, mockSystemFactory);
 
diff --git a/samza-core/src/test/scala/org/apache/samza/util/TestCoordinatorStreamUtil.scala b/samza-core/src/test/scala/org/apache/samza/util/TestCoordinatorStreamUtil.scala
index f8a9f40..f520c6d 100644
--- a/samza-core/src/test/scala/org/apache/samza/util/TestCoordinatorStreamUtil.scala
+++ b/samza-core/src/test/scala/org/apache/samza/util/TestCoordinatorStreamUtil.scala
@@ -19,6 +19,7 @@
 package org.apache.samza.util
 
 import java.util
+import java.util.Collections
 
 import org.apache.samza.coordinator.metadatastore.CoordinatorStreamStore
 import org.apache.samza.coordinator.stream.CoordinatorStreamValueSerde
@@ -27,7 +28,8 @@
 import org.junit.{Assert, Test}
 import org.mockito.Matchers.any
 import org.mockito.Mockito
-import org.apache.samza.config.MapConfig
+import org.apache.samza.config.{JobConfig, MapConfig}
+import org.apache.samza.metadatastore.MetadataStore
 
 class TestCoordinatorStreamUtil {
 
@@ -99,4 +101,27 @@
 
     CoordinatorStreamUtil.writeConfigToCoordinatorStream(configMap)
   }
+
+  @Test
+  def testReadLaunchConfigFromCoordinatorStream() {
+    // Empty config when auto sizing is disabled.
+    Assert.assertEquals(new MapConfig(),  CoordinatorStreamUtil.readLaunchConfigFromCoordinatorStream(new MapConfig(), null))
+
+    val valueSerde = new CoordinatorStreamValueSerde(SetConfig.TYPE)
+    val config = new MapConfig(Collections.singletonMap(JobConfig.JOB_AUTOSIZING_ENABLED, "true"))
+    val expected = new MapConfig(Collections.singletonMap(JobConfig.JOB_AUTOSIZING_CONTAINER_COUNT, "20"))
+    val mockMetadataStore = Mockito.mock(classOf[MetadataStore])
+    val configMap = new util.HashMap[String, Array[Byte]]() {
+      put(CoordinatorStreamStore.serializeCoordinatorMessageKeyToJson(SetConfig.TYPE,
+        JobConfig.JOB_ID),
+        valueSerde.toBytes("321"))
+      put(CoordinatorStreamStore.serializeCoordinatorMessageKeyToJson(SetConfig.TYPE,
+        JobConfig.JOB_AUTOSIZING_CONTAINER_COUNT),
+        valueSerde.toBytes("20"))
+    }
+    Mockito.when(mockMetadataStore.all()).thenReturn(configMap)
+
+    // Verify the launch config is expected
+    Assert.assertEquals(expected, CoordinatorStreamUtil.readLaunchConfigFromCoordinatorStream(config, mockMetadataStore))
+  }
 }
diff --git a/samza-elasticsearch/src/main/java/org/apache/samza/config/ElasticsearchConfig.java b/samza-elasticsearch/src/main/java/org/apache/samza/config/ElasticsearchConfig.java
index b062e24..69d743d 100644
--- a/samza-elasticsearch/src/main/java/org/apache/samza/config/ElasticsearchConfig.java
+++ b/samza-elasticsearch/src/main/java/org/apache/samza/config/ElasticsearchConfig.java
@@ -67,12 +67,12 @@
 
   // Index Request
   public Optional<String> getIndexRequestFactoryClassName() {
-      return Optional.ofNullable(get(CONFIG_KEY_INDEX_REQUEST_FACTORY));
+    return Optional.ofNullable(get(CONFIG_KEY_INDEX_REQUEST_FACTORY));
   }
 
   // Transport client settings
   public Optional<String> getTransportHost() {
-      return Optional.ofNullable(get(CONFIG_KEY_CLIENT_TRANSPORT_HOST));
+    return Optional.ofNullable(get(CONFIG_KEY_CLIENT_TRANSPORT_HOST));
   }
 
   public Optional<Integer> getTransportPort() {
diff --git a/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemAdmin.java b/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemAdmin.java
index 3cadce0..bcf284d 100644
--- a/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemAdmin.java
+++ b/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemAdmin.java
@@ -32,14 +32,14 @@
  * <p>All the methods on this class return {@link UnsupportedOperationException}.</p>
  */
 public class ElasticsearchSystemAdmin implements SystemAdmin {
-  private static final SystemAdmin singleton = new ElasticsearchSystemAdmin();
+  private static final SystemAdmin SINGLETON = new ElasticsearchSystemAdmin();
 
   private ElasticsearchSystemAdmin() {
     // Ensure this can not be constructed.
   }
 
   public static SystemAdmin getInstance() {
-    return singleton;
+    return SINGLETON;
   }
 
   @Override
@@ -55,6 +55,6 @@
 
   @Override
   public Integer offsetComparator(String offset1, String offset2) {
-	  throw new UnsupportedOperationException();
+    throw new UnsupportedOperationException();
   }
 }
diff --git a/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetrics.java b/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetrics.java
index 5a46cba..cc7950e 100644
--- a/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetrics.java
+++ b/samza-elasticsearch/src/main/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetrics.java
@@ -23,17 +23,17 @@
 import org.apache.samza.metrics.MetricsRegistry;
 
 public class ElasticsearchSystemProducerMetrics extends MetricsBase {
-    public final Counter bulkSendSuccess;
-    public final Counter inserts;
-    public final Counter updates;
-    public final Counter conflicts;
+  public final Counter bulkSendSuccess;
+  public final Counter inserts;
+  public final Counter updates;
+  public final Counter conflicts;
 
-    public ElasticsearchSystemProducerMetrics(String systemName, MetricsRegistry registry) {
-        super(systemName + "-", registry);
+  public ElasticsearchSystemProducerMetrics(String systemName, MetricsRegistry registry) {
+    super(systemName + "-", registry);
 
-        bulkSendSuccess = newCounter("bulk-send-success");
-        inserts = newCounter("docs-inserted");
-        updates = newCounter("docs-updated");
-        conflicts = newCounter("version-conflicts");
-    }
+    bulkSendSuccess = newCounter("bulk-send-success");
+    inserts = newCounter("docs-inserted");
+    updates = newCounter("docs-updated");
+    conflicts = newCounter("version-conflicts");
+  }
 }
diff --git a/samza-elasticsearch/src/test/java/org/apache/samza/config/ElasticsearchConfigTest.java b/samza-elasticsearch/src/test/java/org/apache/samza/config/ElasticsearchConfigTest.java
index a5861ba..4493c3c 100644
--- a/samza-elasticsearch/src/test/java/org/apache/samza/config/ElasticsearchConfigTest.java
+++ b/samza-elasticsearch/src/test/java/org/apache/samza/config/ElasticsearchConfigTest.java
@@ -32,7 +32,7 @@
 
 public class ElasticsearchConfigTest {
 
-  private ElasticsearchConfig EMPTY_CONFIG = new ElasticsearchConfig(
+  private static final ElasticsearchConfig EMPTY_CONFIG = new ElasticsearchConfig(
       "es",
       new MapConfig(Collections.<String, String>emptyMap()));
 
diff --git a/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetricsTest.java b/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetricsTest.java
index 5554705..20aac55 100644
--- a/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetricsTest.java
+++ b/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerMetricsTest.java
@@ -29,27 +29,27 @@
 
 public class ElasticsearchSystemProducerMetricsTest {
 
-    public final static String GRP_NAME = "org.apache.samza.system.elasticsearch.ElasticsearchSystemProducerMetrics";
+  public final static String GRP_NAME = "org.apache.samza.system.elasticsearch.ElasticsearchSystemProducerMetrics";
 
-    @Test
-    public void testMetrics() {
-        ReadableMetricsRegistry registry = new MetricsRegistryMap();
-        ElasticsearchSystemProducerMetrics metrics = new ElasticsearchSystemProducerMetrics("es", registry);
-        metrics.bulkSendSuccess.inc(29L);
-        metrics.inserts.inc();
-        metrics.updates.inc(7L);
-        metrics.conflicts.inc(3L);
+  @Test
+  public void testMetrics() {
+    ReadableMetricsRegistry registry = new MetricsRegistryMap();
+    ElasticsearchSystemProducerMetrics metrics = new ElasticsearchSystemProducerMetrics("es", registry);
+    metrics.bulkSendSuccess.inc(29L);
+    metrics.inserts.inc();
+    metrics.updates.inc(7L);
+    metrics.conflicts.inc(3L);
 
-        Set<String> groups = registry.getGroups();
-        assertEquals(1, groups.size());
-        assertEquals(GRP_NAME, groups.toArray()[0]);
+    Set<String> groups = registry.getGroups();
+    assertEquals(1, groups.size());
+    assertEquals(GRP_NAME, groups.toArray()[0]);
 
-        Map<String, Metric> metricMap = registry.getGroup(GRP_NAME);
-        assertEquals(4, metricMap.size());
-        assertEquals(29L, ((Counter) metricMap.get("es-bulk-send-success")).getCount());
-        assertEquals(1L, ((Counter) metricMap.get("es-docs-inserted")).getCount());
-        assertEquals(7L, ((Counter) metricMap.get("es-docs-updated")).getCount());
-        assertEquals(3L, ((Counter) metricMap.get("es-version-conflicts")).getCount());
-    }
+    Map<String, Metric> metricMap = registry.getGroup(GRP_NAME);
+    assertEquals(4, metricMap.size());
+    assertEquals(29L, ((Counter) metricMap.get("es-bulk-send-success")).getCount());
+    assertEquals(1L, ((Counter) metricMap.get("es-docs-inserted")).getCount());
+    assertEquals(7L, ((Counter) metricMap.get("es-docs-updated")).getCount());
+    assertEquals(3L, ((Counter) metricMap.get("es-version-conflicts")).getCount());
+  }
 
 }
\ No newline at end of file
diff --git a/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerTest.java b/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerTest.java
index 992ef0a..58cca2a 100644
--- a/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerTest.java
+++ b/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/ElasticsearchSystemProducerTest.java
@@ -108,7 +108,7 @@
     verify(processorTwo, never()).flush();
   }
 
-  @Test(expected=SamzaException.class)
+  @Test(expected = SamzaException.class)
   public void testFlushFailedSendFromException() throws Exception {
     ArgumentCaptor<BulkProcessor.Listener> listenerCaptor =
         ArgumentCaptor.forClass(BulkProcessor.Listener.class);
@@ -122,7 +122,7 @@
     producer.flush(SOURCE_ONE);
   }
 
-  @Test(expected=SamzaException.class)
+  @Test(expected = SamzaException.class)
   public void testFlushFailedSendFromFailedDocument() throws Exception {
     ArgumentCaptor<BulkProcessor.Listener> listenerCaptor =
         ArgumentCaptor.forClass(BulkProcessor.Listener.class);
diff --git a/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/indexrequest/DefaultIndexRequestFactoryTest.java b/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/indexrequest/DefaultIndexRequestFactoryTest.java
index 7eca108..a8d5baf 100644
--- a/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/indexrequest/DefaultIndexRequestFactoryTest.java
+++ b/samza-elasticsearch/src/test/java/org/apache/samza/system/elasticsearch/indexrequest/DefaultIndexRequestFactoryTest.java
@@ -37,7 +37,7 @@
 
 public class DefaultIndexRequestFactoryTest {
 
-  private static final IndexRequestFactory indexRequestFactory = new DefaultIndexRequestFactory();
+  private static final IndexRequestFactory INDEX_REQUEST_FACTORY = new DefaultIndexRequestFactory();
   private static final String TYPE = "type";
   private static final String INDEX = "index";
   private static final SystemStream SYSTEM = mock(SystemStream.class);
@@ -50,23 +50,23 @@
 
   @Test
   public void testGetIndexRequestStreamName()  {
-    IndexRequest indexRequest = indexRequestFactory.
+    IndexRequest indexRequest = INDEX_REQUEST_FACTORY.
         getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, EMPTY_MSG));
 
     assertEquals(INDEX, indexRequest.index());
     assertEquals(TYPE, indexRequest.type());
   }
 
-  @Test(expected=SamzaException.class)
+  @Test(expected = SamzaException.class)
   public void testGetIndexRequestInvalidStreamName()  {
     when(SYSTEM.getStream()).thenReturn(INDEX);
-    indexRequestFactory.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, EMPTY_MSG));
+    INDEX_REQUEST_FACTORY.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, EMPTY_MSG));
   }
 
   @Test
   public void testGetIndexRequestNoId() throws Exception {
     IndexRequest indexRequest =
-        indexRequestFactory.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, EMPTY_MSG));
+        INDEX_REQUEST_FACTORY.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, EMPTY_MSG));
 
     assertNull(indexRequest.id());
   }
@@ -74,14 +74,14 @@
   @Test
   public void testGetIndexRequestWithId() throws Exception {
     IndexRequest indexRequest =
-        indexRequestFactory.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, "id", EMPTY_MSG));
+        INDEX_REQUEST_FACTORY.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, "id", EMPTY_MSG));
 
     assertEquals("id", indexRequest.id());
   }
 
   @Test
   public void testGetIndexRequestNoPartitionKey() throws Exception {
-    IndexRequest indexRequest = indexRequestFactory.getIndexRequest(
+    IndexRequest indexRequest = INDEX_REQUEST_FACTORY.getIndexRequest(
         new OutgoingMessageEnvelope(SYSTEM, EMPTY_MSG));
 
     assertNull(indexRequest.routing());
@@ -89,7 +89,7 @@
 
   @Test
   public void testGetIndexRequestWithPartitionKey() throws Exception {
-    IndexRequest indexRequest = indexRequestFactory.getIndexRequest(
+    IndexRequest indexRequest = INDEX_REQUEST_FACTORY.getIndexRequest(
         new OutgoingMessageEnvelope(SYSTEM, "shardKey", "id", EMPTY_MSG));
 
     assertEquals("shardKey", indexRequest.routing());
@@ -97,7 +97,7 @@
 
   @Test
   public void testGetIndexRequestMessageBytes() throws Exception {
-    IndexRequest indexRequest = indexRequestFactory.getIndexRequest(
+    IndexRequest indexRequest = INDEX_REQUEST_FACTORY.getIndexRequest(
         new OutgoingMessageEnvelope(SYSTEM, "{\"foo\":\"bar\"}".getBytes(Charsets.UTF_8)));
 
     assertEquals(Collections.singletonMap("foo", "bar"), indexRequest.sourceAsMap());
@@ -105,14 +105,14 @@
 
   @Test
   public void testGetIndexRequestMessageMap() throws Exception {
-    IndexRequest indexRequest = indexRequestFactory.getIndexRequest(
+    IndexRequest indexRequest = INDEX_REQUEST_FACTORY.getIndexRequest(
         new OutgoingMessageEnvelope(SYSTEM, Collections.singletonMap("foo", "bar")));
 
     assertEquals(Collections.singletonMap("foo", "bar"), indexRequest.sourceAsMap());
   }
 
-  @Test(expected=SamzaException.class)
+  @Test(expected = SamzaException.class)
   public void testGetIndexRequestInvalidMessage() throws Exception {
-    indexRequestFactory.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, "{'foo':'bar'}"));
+    INDEX_REQUEST_FACTORY.getIndexRequest(new OutgoingMessageEnvelope(SYSTEM, "{'foo':'bar'}"));
   }
 }
diff --git a/samza-hdfs/src/main/java/org/apache/samza/system/hdfs/descriptors/HdfsSystemDescriptor.java b/samza-hdfs/src/main/java/org/apache/samza/system/hdfs/descriptors/HdfsSystemDescriptor.java
index fd63f79..49a9186 100644
--- a/samza-hdfs/src/main/java/org/apache/samza/system/hdfs/descriptors/HdfsSystemDescriptor.java
+++ b/samza-hdfs/src/main/java/org/apache/samza/system/hdfs/descriptors/HdfsSystemDescriptor.java
@@ -225,30 +225,28 @@
     Map<String, String> config = new HashMap<>(super.toConfig());
     String systemName = getSystemName();
 
-    datePathFormat.ifPresent(
-        val -> config.put(String.format(HdfsConfig.DATE_PATH_FORMAT_STRING(), systemName), val));
+    datePathFormat.ifPresent(val -> config.put(String.format(HdfsConfig.DATE_PATH_FORMAT_STRING(), systemName), val));
     outputBaseDir.ifPresent(val -> config.put(String.format(HdfsConfig.BASE_OUTPUT_DIR(), systemName), val));
     writeBatchSizeBytes.ifPresent(
-        val -> config.put(String.format(HdfsConfig.WRITE_BATCH_SIZE_BYTES(), systemName), String.valueOf(val)));
+      val -> config.put(String.format(HdfsConfig.WRITE_BATCH_SIZE_BYTES(), systemName), String.valueOf(val)));
     writeBatchSizeRecords.ifPresent(
-        val -> config.put(String.format(HdfsConfig.WRITE_BATCH_SIZE_RECORDS(), systemName), String.valueOf(val)));
-    writeCompressionType.ifPresent(
-        val -> config.put(String.format(HdfsConfig.COMPRESSION_TYPE(), systemName), val));
+      val -> config.put(String.format(HdfsConfig.WRITE_BATCH_SIZE_RECORDS(), systemName), String.valueOf(val)));
+    writeCompressionType.ifPresent(val -> config.put(String.format(HdfsConfig.COMPRESSION_TYPE(), systemName), val));
     writerClass.ifPresent(val -> config.put(String.format(HdfsConfig.HDFS_WRITER_CLASS_NAME(), systemName), val));
 
     consumerBufferCapacity.ifPresent(
-        val -> config.put(String.format(HdfsConfig.CONSUMER_BUFFER_CAPACITY(), systemName), String.valueOf(val)));
+      val -> config.put(String.format(HdfsConfig.CONSUMER_BUFFER_CAPACITY(), systemName), String.valueOf(val)));
     consumerMaxRetries.ifPresent(
-        val -> config.put(String.format(HdfsConfig.CONSUMER_NUM_MAX_RETRIES(), systemName), String.valueOf(val)));
+      val -> config.put(String.format(HdfsConfig.CONSUMER_NUM_MAX_RETRIES(), systemName), String.valueOf(val)));
     consumerWhiteList.ifPresent(
-        val -> config.put(String.format(HdfsConfig.CONSUMER_PARTITIONER_WHITELIST(), systemName), val));
+      val -> config.put(String.format(HdfsConfig.CONSUMER_PARTITIONER_WHITELIST(), systemName), val));
     consumerBlackList.ifPresent(
-        val -> config.put(String.format(HdfsConfig.CONSUMER_PARTITIONER_BLACKLIST(), systemName), val));
+      val -> config.put(String.format(HdfsConfig.CONSUMER_PARTITIONER_BLACKLIST(), systemName), val));
     consumerGroupPattern.ifPresent(
-        val -> config.put(String.format(HdfsConfig.CONSUMER_PARTITIONER_GROUP_PATTERN(), systemName), val));
+      val -> config.put(String.format(HdfsConfig.CONSUMER_PARTITIONER_GROUP_PATTERN(), systemName), val));
     consumerReader.ifPresent(val -> config.put(String.format(HdfsConfig.FILE_READER_TYPE(), systemName), val));
     consumerStagingDirectory.ifPresent(
-        val -> config.put(String.format(HdfsConfig.STAGING_DIRECTORY(), systemName), val));
+      val -> config.put(String.format(HdfsConfig.STAGING_DIRECTORY(), systemName), val));
 
     return config;
   }
diff --git a/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/TestHdfsSystemConsumer.java b/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/TestHdfsSystemConsumer.java
index 6cbf7ba..11e4d73 100644
--- a/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/TestHdfsSystemConsumer.java
+++ b/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/TestHdfsSystemConsumer.java
@@ -113,7 +113,7 @@
     while (eventsReceived < totalEvents && remainingRetries > 0) {
       remainingRetries--;
       Map<SystemStreamPartition, List<IncomingMessageEnvelope>> result = systemConsumer.poll(systemStreamPartitionSet, 1000);
-      for(SystemStreamPartition ssp : result.keySet()) {
+      for (SystemStreamPartition ssp : result.keySet()) {
         List<IncomingMessageEnvelope> messageEnvelopeList = result.get(ssp);
         overallResults.putIfAbsent(ssp, new ArrayList<>());
         overallResults.get(ssp).addAll(messageEnvelopeList);
@@ -127,7 +127,7 @@
     Assert.assertEquals(NUM_FILES, overallResults.size());
     overallResults.values().forEach(messages -> {
       Assert.assertEquals(NUM_EVENTS + 1, messages.size());
-      for (int index = 0;index < NUM_EVENTS; index++) {
+      for (int index = 0; index < NUM_EVENTS; index++) {
         GenericRecord record = (GenericRecord) messages.get(index).getMessage();
         Assert.assertEquals(index % NUM_EVENTS, record.get(FIELD_1));
         Assert.assertEquals("string_" + (index % NUM_EVENTS), record.get(FIELD_2).toString());
diff --git a/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/partitioner/TestDirectoryPartitioner.java b/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/partitioner/TestDirectoryPartitioner.java
index aea32ff..47cbfcd 100644
--- a/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/partitioner/TestDirectoryPartitioner.java
+++ b/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/partitioner/TestDirectoryPartitioner.java
@@ -70,7 +70,7 @@
   @Test
   public void testBasicWhiteListFiltering() {
     List<FileMetadata> testList = new ArrayList<>();
-    int NUM_INPUT = 9;
+    int numInput = 9;
     String[] inputFiles = {
       "part-001.avro",
       "part-002.avro",
@@ -82,27 +82,27 @@
       "delta-02.avro",
       "part-006.avro"};
     long[] fileLength = {150582, 138132, 214005, 205738, 158273, 982345, 313245, 234212, 413232};
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(inputFiles[i], fileLength[i]));
     }
     String whiteList = "part-.*\\.avro";
     String blackList = "";
     String groupPattern = "";
-    int EXPECTED_NUM_PARTITION = 6;
-    int[][] EXPECTED_PARTITIONING = {{0}, {1}, {2}, {4}, {6}, {8}};
+    int expectedNumPartition = 6;
+    int[][] expectedPartitioning = {{0}, {1}, {2}, {4}, {6}, {8}};
 
     DirectoryPartitioner directoryPartitioner =
       new DirectoryPartitioner(whiteList, blackList, groupPattern, new TestFileSystemAdapter(testList));
     Map<Partition, SystemStreamPartitionMetadata> metadataMap = directoryPartitioner.getPartitionMetadataMap("hdfs", null);
-    Assert.assertEquals(EXPECTED_NUM_PARTITION, metadataMap.size());
+    Assert.assertEquals(expectedNumPartition, metadataMap.size());
     Map<Partition, List<String>> descriptorMap = directoryPartitioner.getPartitionDescriptor("hdfs");
-    verifyPartitionDescriptor(inputFiles, EXPECTED_PARTITIONING, EXPECTED_NUM_PARTITION, descriptorMap);
+    verifyPartitionDescriptor(inputFiles, expectedPartitioning, expectedNumPartition, descriptorMap);
   }
 
   @Test
   public void testBasicBlackListFiltering() {
     List<FileMetadata> testList = new ArrayList<>();
-    int NUM_INPUT = 9;
+    int numInput = 9;
     String[] inputFiles = {
       "part-001.avro",
       "part-002.avro",
@@ -114,27 +114,27 @@
       "delta-02.avro",
       "part-006.avro"};
     long[] fileLength = {150582, 138132, 214005, 205738, 158273, 982345, 313245, 234212, 413232};
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(inputFiles[i], fileLength[i]));
     }
     String whiteList = ".*";
     String blackList = "delta-.*\\.avro";
     String groupPattern = "";
-    int EXPECTED_NUM_PARTITION = 6;
-    int[][] EXPECTED_PARTITIONING = {{0}, {1}, {2}, {4}, {6}, {8}};
+    int expectedNumPartition = 6;
+    int[][] expectedPartitioning = {{0}, {1}, {2}, {4}, {6}, {8}};
 
     DirectoryPartitioner directoryPartitioner =
       new DirectoryPartitioner(whiteList, blackList, groupPattern, new TestFileSystemAdapter(testList));
     Map<Partition, SystemStreamPartitionMetadata> metadataMap = directoryPartitioner.getPartitionMetadataMap("hdfs", null);
-    Assert.assertEquals(EXPECTED_NUM_PARTITION, metadataMap.size());
+    Assert.assertEquals(expectedNumPartition, metadataMap.size());
     Map<Partition, List<String>> descriporMap = directoryPartitioner.getPartitionDescriptor("hdfs");
-    verifyPartitionDescriptor(inputFiles, EXPECTED_PARTITIONING, EXPECTED_NUM_PARTITION, descriporMap);
+    verifyPartitionDescriptor(inputFiles, expectedPartitioning, expectedNumPartition, descriporMap);
   }
 
   @Test
   public void testWhiteListBlackListFiltering() {
     List<FileMetadata> testList = new ArrayList<>();
-    int NUM_INPUT = 9;
+    int numInput = 9;
     String[] inputFiles = {
       "part-001.avro",
       "part-002.avro",
@@ -146,27 +146,27 @@
       "delta-02.avro",
       "part-006.avro"};
     long[] fileLength = {150582, 138132, 214005, 205738, 158273, 982345, 313245, 234212, 413232};
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(inputFiles[i], fileLength[i]));
     }
     String whiteList = "part-.*\\.avro";
     String blackList = "part-002.avro";
     String groupPattern = "";
-    int EXPECTED_NUM_PARTITION = 5;
-    int[][] EXPECTED_PARTITIONING = {{0}, {2}, {4}, {6}, {8}};
+    int expectedNumPartition = 5;
+    int[][] expectedPartitioning = {{0}, {2}, {4}, {6}, {8}};
 
     DirectoryPartitioner directoryPartitioner =
       new DirectoryPartitioner(whiteList, blackList, groupPattern, new TestFileSystemAdapter(testList));
     Map<Partition, SystemStreamPartitionMetadata> metadataMap = directoryPartitioner.getPartitionMetadataMap("hdfs", null);
-    Assert.assertEquals(EXPECTED_NUM_PARTITION, metadataMap.size());
+    Assert.assertEquals(expectedNumPartition, metadataMap.size());
     Map<Partition, List<String>> descriporMap = directoryPartitioner.getPartitionDescriptor("hdfs");
-    verifyPartitionDescriptor(inputFiles, EXPECTED_PARTITIONING, EXPECTED_NUM_PARTITION, descriporMap);
+    verifyPartitionDescriptor(inputFiles, expectedPartitioning, expectedNumPartition, descriporMap);
   }
 
   @Test
   public void testBasicGrouping() {
     List<FileMetadata> testList = new ArrayList<>();
-    int NUM_INPUT = 9;
+    int numInput = 9;
     String[] inputFiles = {
       "00_10-run_2016-08-15-13-04-part.0.150582.avro",
       "00_10-run_2016-08-15-13-04-part.1.138132.avro",
@@ -178,15 +178,15 @@
       "00_10-run_2016-08-15-13-06-part.1.234212.avro",
       "00_10-run_2016-08-15-13-06-part.2.413232.avro"};
     long[] fileLength = {150582, 138132, 214005, 205738, 158273, 982345, 313245, 234212, 413232};
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(inputFiles[i], fileLength[i]));
     }
 
     String whiteList = ".*\\.avro";
     String blackList = "";
     String groupPattern = ".*part\\.[id]\\..*\\.avro"; // 00_10-run_2016-08-15-13-04-part.[id].138132.avro
-    int EXPECTED_NUM_PARTITION = 3;
-    int[][] EXPECTED_PARTITIONING = {
+    int expectedNumPartition = 3;
+    int[][] expectedPartitioning = {
       {0, 3, 6}, // files from index 0, 3, 6 should be grouped into one partition
       {1, 4, 7}, // similar as above
       {2, 5, 8}};
@@ -194,9 +194,9 @@
     DirectoryPartitioner directoryPartitioner =
       new DirectoryPartitioner(whiteList, blackList, groupPattern, new TestFileSystemAdapter(testList));
     Map<Partition, SystemStreamPartitionMetadata> metadataMap = directoryPartitioner.getPartitionMetadataMap("hdfs", null);
-    Assert.assertEquals(EXPECTED_NUM_PARTITION, metadataMap.size());
+    Assert.assertEquals(expectedNumPartition, metadataMap.size());
     Map<Partition, List<String>> descriporMap = directoryPartitioner.getPartitionDescriptor("hdfs");
-    verifyPartitionDescriptor(inputFiles, EXPECTED_PARTITIONING, EXPECTED_NUM_PARTITION, descriporMap);
+    verifyPartitionDescriptor(inputFiles, expectedPartitioning, expectedNumPartition, descriporMap);
   }
 
   @Test
@@ -204,7 +204,7 @@
     // the update is valid when there are only new files being added to the directory
     // no changes on the old files
     List<FileMetadata> testList = new ArrayList<>();
-    int NUM_INPUT = 6;
+    int numInput = 6;
     String[] inputFiles = {
       "part-001.avro",
       "part-002.avro",
@@ -213,23 +213,23 @@
       "part-004.avro",
       "part-006.avro"};
     long[] fileLength = {150582, 138132, 214005, 205738, 158273, 982345};
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(inputFiles[i], fileLength[i]));
     }
     String whiteList = ".*";
     String blackList = "";
     String groupPattern = "";
-    int EXPECTED_NUM_PARTITION = 6;
-    int[][] EXPECTED_PARTITIONING = {{0}, {1}, {2}, {3}, {4}, {5}};
+    int expectedNumPartition = 6;
+    int[][] expectedPartitioning = {{0}, {1}, {2}, {3}, {4}, {5}};
 
     DirectoryPartitioner directoryPartitioner =
       new DirectoryPartitioner(whiteList, blackList, groupPattern, new TestFileSystemAdapter(testList));
     Map<Partition, SystemStreamPartitionMetadata> metadataMap = directoryPartitioner.getPartitionMetadataMap("hdfs", null);
-    Assert.assertEquals(EXPECTED_NUM_PARTITION, metadataMap.size());
+    Assert.assertEquals(expectedNumPartition, metadataMap.size());
     Map<Partition, List<String>> descriporMap = directoryPartitioner.getPartitionDescriptor("hdfs");
-    verifyPartitionDescriptor(inputFiles, EXPECTED_PARTITIONING, EXPECTED_NUM_PARTITION, descriporMap);
+    verifyPartitionDescriptor(inputFiles, expectedPartitioning, expectedNumPartition, descriporMap);
 
-    NUM_INPUT = 7;
+    numInput = 7;
     String[] updatedInputFiles = {
       "part-001.avro",
       "part-002.avro",
@@ -240,22 +240,22 @@
       "part-006.avro"};
     long[] updatedFileLength = {150582, 138132, 214005, 205738, 158273, 2513454, 982345};
     testList.clear();
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(updatedInputFiles[i], updatedFileLength[i]));
     }
     directoryPartitioner =
       new DirectoryPartitioner(whiteList, blackList, groupPattern, new TestFileSystemAdapter(testList));
     metadataMap = directoryPartitioner.getPartitionMetadataMap("hdfs", descriporMap);
-    Assert.assertEquals(EXPECTED_NUM_PARTITION, metadataMap.size()); // still expect only 6 partitions instead of 7
+    Assert.assertEquals(expectedNumPartition, metadataMap.size()); // still expect only 6 partitions instead of 7
     Map<Partition, List<String>> updatedDescriptorMap = directoryPartitioner.getPartitionDescriptor("hdfs");
-    verifyPartitionDescriptor(inputFiles, EXPECTED_PARTITIONING, EXPECTED_NUM_PARTITION, updatedDescriptorMap);
+    verifyPartitionDescriptor(inputFiles, expectedPartitioning, expectedNumPartition, updatedDescriptorMap);
   }
 
   @Test
   public void testInvalidDirectoryUpdating() {
     // the update is invalid when at least one old file is removed
     List<FileMetadata> testList = new ArrayList<>();
-    int NUM_INPUT = 6;
+    int numInput = 6;
     String[] inputFiles = {
       "part-001.avro",
       "part-002.avro",
@@ -264,21 +264,21 @@
       "part-004.avro",
       "part-006.avro"};
     long[] fileLength = {150582, 138132, 214005, 205738, 158273, 982345};
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(inputFiles[i], fileLength[i]));
     }
     String whiteList = ".*";
     String blackList = "";
     String groupPattern = "";
-    int EXPECTED_NUM_PARTITION = 6;
-    int[][] EXPECTED_PARTITIONING = {{0}, {1}, {2}, {3}, {4}, {5}};
+    int expectedNumPartition = 6;
+    int[][] expectedPartitioning = {{0}, {1}, {2}, {3}, {4}, {5}};
 
     DirectoryPartitioner directoryPartitioner =
       new DirectoryPartitioner(whiteList, blackList, groupPattern, new TestFileSystemAdapter(testList));
     Map<Partition, SystemStreamPartitionMetadata> metadataMap = directoryPartitioner.getPartitionMetadataMap("hdfs", null);
-    Assert.assertEquals(EXPECTED_NUM_PARTITION, metadataMap.size());
+    Assert.assertEquals(expectedNumPartition, metadataMap.size());
     Map<Partition, List<String>> descriporMap = directoryPartitioner.getPartitionDescriptor("hdfs");
-    verifyPartitionDescriptor(inputFiles, EXPECTED_PARTITIONING, EXPECTED_NUM_PARTITION, descriporMap);
+    verifyPartitionDescriptor(inputFiles, expectedPartitioning, expectedNumPartition, descriporMap);
 
     String[] updatedInputFiles = {
       "part-001.avro",
@@ -289,7 +289,7 @@
       "part-006.avro"};
     long[] updatedFileLength = {150582, 138132, 214005, 205738, 158273, 982345};
     testList.clear();
-    for (int i = 0; i < NUM_INPUT; i++) {
+    for (int i = 0; i < numInput; i++) {
       testList.add(new FileMetadata(updatedInputFiles[i], updatedFileLength[i]));
     }
     directoryPartitioner =
diff --git a/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/reader/TestAvroFileHdfsReader.java b/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/reader/TestAvroFileHdfsReader.java
index aa828d9..e8ebcb0 100644
--- a/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/reader/TestAvroFileHdfsReader.java
+++ b/samza-hdfs/src/test/java/org/apache/samza/system/hdfs/reader/TestAvroFileHdfsReader.java
@@ -87,7 +87,7 @@
     SingleFileHdfsReader reader = new AvroFileHdfsReader(ssp);
     reader.open(AVRO_FILE, "0");
     int index = 0;
-    for (;index < NUM_EVENTS / 2; index++) {
+    for (; index < NUM_EVENTS / 2; index++) {
       GenericRecord record = (GenericRecord) reader.readNext().getMessage();
       Assert.assertEquals(index, record.get(FIELD_1));
       Assert.assertEquals("string_" + index, record.get(FIELD_2).toString());
@@ -96,7 +96,7 @@
     reader.close();
     reader = new AvroFileHdfsReader(ssp);
     reader.open(AVRO_FILE, offset);
-    for (;index < NUM_EVENTS; index++) {
+    for (; index < NUM_EVENTS; index++) {
       GenericRecord record = (GenericRecord) reader.readNext().getMessage();
       Assert.assertEquals(index, record.get(FIELD_1));
       Assert.assertEquals("string_" + index, record.get(FIELD_2).toString());
@@ -110,7 +110,7 @@
     SystemStreamPartition ssp = new SystemStreamPartition("hdfs", "testStream", new Partition(0));
     SingleFileHdfsReader reader = new AvroFileHdfsReader(ssp);
     reader.open(AVRO_FILE, "0");
-    for (int i = 0;i < NUM_EVENTS / 2; i++) {
+    for (int i = 0; i < NUM_EVENTS / 2; i++) {
       reader.readNext();
     }
     String offset = reader.nextOffset();
diff --git a/samza-kafka/src/main/java/org/apache/samza/checkpoint/kafka/KafkaCheckpointLogKeySerde.java b/samza-kafka/src/main/java/org/apache/samza/checkpoint/kafka/KafkaCheckpointLogKeySerde.java
index 8e0c815..cc771d3 100644
--- a/samza-kafka/src/main/java/org/apache/samza/checkpoint/kafka/KafkaCheckpointLogKeySerde.java
+++ b/samza-kafka/src/main/java/org/apache/samza/checkpoint/kafka/KafkaCheckpointLogKeySerde.java
@@ -39,12 +39,12 @@
   private static final String SSP_GROUPER_FACTORY_FIELD = "systemstreampartition-grouper-factory";
   private static final String TASK_NAME_FIELD = "taskName";
   private static final String TYPE_FIELD = "type";
-  private static final ObjectMapper mapper = new ObjectMapper();
+  private static final ObjectMapper MAPPER = new ObjectMapper();
 
   @Override
   public byte[] toBytes(KafkaCheckpointLogKey key) {
     try {
-      return mapper.writeValueAsBytes(ImmutableMap.of(
+      return MAPPER.writeValueAsBytes(ImmutableMap.of(
           SSP_GROUPER_FACTORY_FIELD, key.getGrouperFactoryClassName(),
           TASK_NAME_FIELD, key.getTaskName().toString(),
           TYPE_FIELD, key.getType()
@@ -57,7 +57,7 @@
   @Override
   public KafkaCheckpointLogKey fromBytes(byte[] bytes) {
     try {
-      LinkedHashMap<String, String> deserializedKey = mapper.readValue(bytes, LinkedHashMap.class);
+      LinkedHashMap<String, String> deserializedKey = MAPPER.readValue(bytes, LinkedHashMap.class);
 
       if (!KafkaCheckpointLogKey.CHECKPOINT_KEY_TYPE.equals(deserializedKey.get(TYPE_FIELD))) {
         throw new IllegalArgumentException(String.format("Invalid key detected. Type of the key is %s", deserializedKey.get(TYPE_FIELD)));
diff --git a/samza-kafka/src/main/java/org/apache/samza/config/KafkaConsumerConfig.java b/samza-kafka/src/main/java/org/apache/samza/config/KafkaConsumerConfig.java
index 3ac84df..d7464c6 100644
--- a/samza-kafka/src/main/java/org/apache/samza/config/KafkaConsumerConfig.java
+++ b/samza-kafka/src/main/java/org/apache/samza/config/KafkaConsumerConfig.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,14 +15,11 @@
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- *
  */
-
 package org.apache.samza.config;
 
 import java.util.HashMap;
 import java.util.Map;
-import java.util.Optional;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.commons.lang3.tuple.Pair;
@@ -128,7 +124,7 @@
   }
 
   public int fetchMessageMaxBytes() {
-    String fetchSize = (String)get("fetch.message.max.bytes");
+    String fetchSize = (String) get("fetch.message.max.bytes");
     if (StringUtils.isBlank(fetchSize)) {
       return FETCH_MAX_BYTES;
     } else  {
@@ -177,26 +173,26 @@
    */
   static String getAutoOffsetResetValue(final String autoOffsetReset, final String samzaOffsetDefault) {
     // valid kafka consumer values
-    final String KAFKA_OFFSET_LATEST = "latest";
-    final String KAFKA_OFFSET_EARLIEST = "earliest";
-    final String KAFKA_OFFSET_NONE = "none";
+    final String kafkaOffsetLatest = "latest";
+    final String kafkaOffsetEarliest = "earliest";
+    final String kafkaOffsetNone = "none";
 
     // if the value for KafkaConsumer is set - use it.
     if (!StringUtils.isBlank(autoOffsetReset)) {
-      if (autoOffsetReset.equals(KAFKA_OFFSET_EARLIEST) || autoOffsetReset.equals(KAFKA_OFFSET_LATEST)
-          || autoOffsetReset.equals(KAFKA_OFFSET_NONE)) {
+      if (autoOffsetReset.equals(kafkaOffsetEarliest) || autoOffsetReset.equals(kafkaOffsetLatest)
+          || autoOffsetReset.equals(kafkaOffsetNone)) {
         return autoOffsetReset;
       }
       // translate old kafka consumer values into new ones (SAMZA-1987 top remove it)
       String newAutoOffsetReset;
       switch (autoOffsetReset) {
         case "largest":
-          newAutoOffsetReset = KAFKA_OFFSET_LATEST;
-          LOG.warn("Using old (deprecated) value for kafka consumer config auto.offset.reset = {}. The right value should be {}", autoOffsetReset, KAFKA_OFFSET_LATEST);
+          newAutoOffsetReset = kafkaOffsetLatest;
+          LOG.warn("Using old (deprecated) value for kafka consumer config auto.offset.reset = {}. The right value should be {}", autoOffsetReset, kafkaOffsetLatest);
           break;
         case "smallest":
-          newAutoOffsetReset = KAFKA_OFFSET_EARLIEST;
-          LOG.warn("Using old (deprecated) value for kafka consumer config auto.offset.reset = {}. The right value should be {}", autoOffsetReset, KAFKA_OFFSET_EARLIEST);
+          newAutoOffsetReset = kafkaOffsetEarliest;
+          LOG.warn("Using old (deprecated) value for kafka consumer config auto.offset.reset = {}. The right value should be {}", autoOffsetReset, kafkaOffsetEarliest);
           break;
         default:
           throw new SamzaException("Using invalid value for kafka consumer config auto.offset.reset " + autoOffsetReset + ". See KafkaConsumer config for the correct values.");
@@ -207,14 +203,14 @@
     }
 
     // in case kafka consumer configs are not provided we should match them to Samza's ones.
-    String newAutoOffsetReset = KAFKA_OFFSET_LATEST;
+    String newAutoOffsetReset = kafkaOffsetLatest;
     if (!StringUtils.isBlank(samzaOffsetDefault)) {
       switch (samzaOffsetDefault) {
         case SystemConfig.SAMZA_SYSTEM_OFFSET_UPCOMING:
-          newAutoOffsetReset = KAFKA_OFFSET_LATEST;
+          newAutoOffsetReset = kafkaOffsetLatest;
           break;
         case SystemConfig.SAMZA_SYSTEM_OFFSET_OLDEST:
-          newAutoOffsetReset = KAFKA_OFFSET_EARLIEST;
+          newAutoOffsetReset = kafkaOffsetEarliest;
           break;
         default:
           throw new SamzaException("Using invalid value for samza default offset config " + autoOffsetReset + ". See samza config for the correct values");
diff --git a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxy.java b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxy.java
index 4ecfc6a..329073c 100644
--- a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxy.java
+++ b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxy.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,9 +15,7 @@
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- *
  */
-
 package org.apache.samza.system.kafka;
 
 import java.time.Instant;
@@ -124,12 +121,12 @@
 
     isRunning = false;
     try {
-      consumerPollThread.join(timeoutMs/2);
+      consumerPollThread.join(timeoutMs / 2);
       // join() may timeout
       // in this case we should interrupt it and wait again
       if (consumerPollThread.isAlive()) {
         consumerPollThread.interrupt();
-        consumerPollThread.join(timeoutMs/2);
+        consumerPollThread.join(timeoutMs / 2);
       }
     } catch (InterruptedException e) {
       LOG.warn("Join in KafkaConsumerProxy has failed", e);
@@ -172,7 +169,15 @@
 
   private void initializeLags() {
     // This is expensive, so only do it once at the beginning. After the first poll, we can rely on metrics for lag.
-    Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet());
+
+    Map<TopicPartition, Long> endOffsets;
+    // Synchronize, in case the consumer is used in some other thread (metadata or something else)
+    synchronized (kafkaConsumer) {
+      endOffsets = kafkaConsumer.endOffsets(topicPartitionToSSP.keySet());
+    }
+    if (endOffsets == null) {
+      throw new SamzaException("Failed to fetch kafka consumer endoffsets for system " + systemName);
+    }
     endOffsets.forEach((tp, offset) -> {
       SystemStreamPartition ssp = topicPartitionToSSP.get(tp);
       long startingOffset = nextOffsets.get(ssp);
@@ -444,13 +449,11 @@
     }
   }
 
-   @Override
+  @Override
   public String toString() {
     return String.format("consumerProxy-%s-%s", systemName, clientId);
   }
 
-
-
   /**
    * Used to create an instance of {@link KafkaConsumerProxy}. This can be overridden in case an extension of
    * {@link KafkaConsumerProxy} needs to be used within kafka system components like {@link KafkaSystemConsumer}.
diff --git a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxyFactory.java b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxyFactory.java
index cc4bddc..903e511 100644
--- a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxyFactory.java
+++ b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaConsumerProxyFactory.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,7 +15,6 @@
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- *
  */
 package org.apache.samza.system.kafka;
 
diff --git a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaStreamSpec.java b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaStreamSpec.java
index d621308..3324c67 100644
--- a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaStreamSpec.java
+++ b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaStreamSpec.java
@@ -97,18 +97,18 @@
    */
   public static KafkaStreamSpec fromSpec(StreamSpec originalSpec) {
     if (originalSpec instanceof KafkaStreamSpec) {
-      return ((KafkaStreamSpec) originalSpec);
+      return (KafkaStreamSpec) originalSpec;
     }
 
-    int replicationFactor = Integer.parseInt(originalSpec.getOrDefault( KafkaConfig.TOPIC_REPLICATION_FACTOR(),
-                                                                        KafkaConfig.TOPIC_DEFAULT_REPLICATION_FACTOR()));
+    int replicationFactor = Integer.parseInt(originalSpec.getOrDefault(KafkaConfig.TOPIC_REPLICATION_FACTOR(),
+        KafkaConfig.TOPIC_DEFAULT_REPLICATION_FACTOR()));
 
-    return new KafkaStreamSpec( originalSpec.getId(),
-                                originalSpec.getPhysicalName(),
-                                originalSpec.getSystemName(),
-                                originalSpec.getPartitionCount(),
-                                replicationFactor,
-                                mapToProperties(filterUnsupportedProperties(originalSpec.getConfig())));
+    return new KafkaStreamSpec(originalSpec.getId(),
+                               originalSpec.getPhysicalName(),
+                               originalSpec.getSystemName(),
+                               originalSpec.getPartitionCount(),
+                               replicationFactor,
+                               mapToProperties(filterUnsupportedProperties(originalSpec.getConfig())));
   }
 
   /**
diff --git a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemAdmin.java b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemAdmin.java
index 91d4b11..a60752c 100644
--- a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemAdmin.java
+++ b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemAdmin.java
@@ -213,49 +213,49 @@
   public Map<String, SystemStreamMetadata> getSystemStreamPartitionCounts(Set<String> streamNames, long cacheTTL) {
     // This optimization omits actual metadata for performance. Instead, we inject a dummy for all partitions.
     final SystemStreamMetadata.SystemStreamPartitionMetadata dummySspm =
-        new SystemStreamMetadata.SystemStreamPartitionMetadata(null, null, null) {
-          String msg =
-              "getSystemStreamPartitionCounts does not populate SystemStreaMetadata info. Only number of partitions";
+      new SystemStreamMetadata.SystemStreamPartitionMetadata(null, null, null) {
+        String msg =
+            "getSystemStreamPartitionCounts does not populate SystemStreaMetadata info. Only number of partitions";
 
-          @Override
-          public String getOldestOffset() {
-            throw new NotImplementedException(msg);
-          }
+        @Override
+        public String getOldestOffset() {
+          throw new NotImplementedException(msg);
+        }
 
-          @Override
-          public String getNewestOffset() {
-            throw new NotImplementedException(msg);
-          }
+        @Override
+        public String getNewestOffset() {
+          throw new NotImplementedException(msg);
+        }
 
-          @Override
-          public String getUpcomingOffset() {
-            throw new NotImplementedException(msg);
-          }
-        };
+        @Override
+        public String getUpcomingOffset() {
+          throw new NotImplementedException(msg);
+        }
+      };
 
     ExponentialSleepStrategy strategy = new ExponentialSleepStrategy(DEFAULT_EXPONENTIAL_SLEEP_BACK_OFF_MULTIPLIER,
         DEFAULT_EXPONENTIAL_SLEEP_INITIAL_DELAY_MS, DEFAULT_EXPONENTIAL_SLEEP_MAX_DELAY_MS);
 
     Function1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>> fetchMetadataOperation =
-        new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>>() {
-          @Override
-          public Map<String, SystemStreamMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
-            Map<String, SystemStreamMetadata> allMetadata = new HashMap<>();
+      new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>>() {
+        @Override
+        public Map<String, SystemStreamMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
+          Map<String, SystemStreamMetadata> allMetadata = new HashMap<>();
 
-            streamNames.forEach(streamName -> {
-              Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = new HashMap<>();
+          streamNames.forEach(streamName -> {
+            Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = new HashMap<>();
 
-              List<PartitionInfo> partitionInfos = threadSafeKafkaConsumer.execute(consumer -> consumer.partitionsFor(streamName));
-              LOG.debug("Stream {} has partitions {}", streamName, partitionInfos);
-              partitionInfos.forEach(
-                  partitionInfo -> partitionMetadata.put(new Partition(partitionInfo.partition()), dummySspm));
-              allMetadata.put(streamName, new SystemStreamMetadata(streamName, partitionMetadata));
-            });
+            List<PartitionInfo> partitionInfos = threadSafeKafkaConsumer.execute(consumer -> consumer.partitionsFor(streamName));
+            LOG.debug("Stream {} has partitions {}", streamName, partitionInfos);
+            partitionInfos.forEach(
+              partitionInfo -> partitionMetadata.put(new Partition(partitionInfo.partition()), dummySspm));
+            allMetadata.put(streamName, new SystemStreamMetadata(streamName, partitionMetadata));
+          });
 
-            loop.done();
-            return allMetadata;
-          }
-        };
+          loop.done();
+          return allMetadata;
+        }
+      };
 
     Map<String, SystemStreamMetadata> result = strategy.run(fetchMetadataOperation,
         new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
@@ -323,51 +323,51 @@
 
     Function1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition,
         SystemStreamMetadata.SystemStreamPartitionMetadata>> fetchTopicPartitionMetadataOperation =
-        new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition,
-            SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
+      new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition,
+          SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
 
-          @Override
-          public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply(
-              ExponentialSleepStrategy.RetryLoop loop) {
-            OffsetsMaps topicPartitionsMetadata = fetchTopicPartitionsMetadata(topicPartitions);
+        @Override
+        public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply(
+            ExponentialSleepStrategy.RetryLoop loop) {
+          OffsetsMaps topicPartitionsMetadata = fetchTopicPartitionsMetadata(topicPartitions);
 
-            Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> sspToSSPMetadata = new HashMap<>();
-            for (SystemStreamPartition ssp : ssps) {
-              String oldestOffset = topicPartitionsMetadata.getOldestOffsets().get(ssp);
-              String newestOffset = topicPartitionsMetadata.getNewestOffsets().get(ssp);
-              String upcomingOffset = topicPartitionsMetadata.getUpcomingOffsets().get(ssp);
+          Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> sspToSSPMetadata = new HashMap<>();
+          for (SystemStreamPartition ssp : ssps) {
+            String oldestOffset = topicPartitionsMetadata.getOldestOffsets().get(ssp);
+            String newestOffset = topicPartitionsMetadata.getNewestOffsets().get(ssp);
+            String upcomingOffset = topicPartitionsMetadata.getUpcomingOffsets().get(ssp);
 
-              sspToSSPMetadata.put(ssp,
-                  new SystemStreamMetadata.SystemStreamPartitionMetadata(oldestOffset, newestOffset, upcomingOffset));
-            }
-            loop.done();
-            return sspToSSPMetadata;
+            sspToSSPMetadata.put(ssp,
+                new SystemStreamMetadata.SystemStreamPartitionMetadata(oldestOffset, newestOffset, upcomingOffset));
           }
-        };
+          loop.done();
+          return sspToSSPMetadata;
+        }
+      };
 
     Function2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit> onExceptionRetryOperation =
-        new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
-          @Override
-          public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
-            if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
-              LOG.warn(
-                  String.format("Fetching SSP metadata for: %s threw an exception. Retrying.", ssps), exception);
-            } else {
-              LOG.error(String.format("Fetching SSP metadata for: %s threw an exception.", ssps), exception);
-              loop.done();
-              throw new SamzaException(exception);
-            }
-            return null;
+      new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
+        @Override
+        public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
+          if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
+            LOG.warn(
+                String.format("Fetching SSP metadata for: %s threw an exception. Retrying.", ssps), exception);
+          } else {
+            LOG.error(String.format("Fetching SSP metadata for: %s threw an exception.", ssps), exception);
+            loop.done();
+            throw new SamzaException(exception);
           }
-        };
+          return null;
+        }
+      };
 
     Function0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fallbackOperation =
-        new AbstractFunction0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
-          @Override
-          public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply() {
-            throw new SamzaException("Failed to get SSP metadata");
-          }
-        };
+      new AbstractFunction0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
+        @Override
+        public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply() {
+          throw new SamzaException("Failed to get SSP metadata");
+        }
+      };
 
     return retryBackoff.run(fetchTopicPartitionMetadataOperation, onExceptionRetryOperation).getOrElse(fallbackOperation);
   }
@@ -389,41 +389,41 @@
     LOG.info("Fetching system stream metadata for {} from system {}", streamNames, systemName);
 
     Function1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>> fetchMetadataOperation =
-        new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>>() {
-          @Override
-          public Map<String, SystemStreamMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
-            Map<String, SystemStreamMetadata> metadata = fetchSystemStreamMetadata(streamNames);
-            loop.done();
-            return metadata;
-          }
-        };
+      new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>>() {
+        @Override
+        public Map<String, SystemStreamMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
+          Map<String, SystemStreamMetadata> metadata = fetchSystemStreamMetadata(streamNames);
+          loop.done();
+          return metadata;
+        }
+      };
 
     Function2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit> onExceptionRetryOperation =
-        new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
-          @Override
-          public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
-            if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
-              LOG.warn(
-                  String.format("Fetching system stream metadata for: %s threw an exception. Retrying.", streamNames),
-                  exception);
-            } else {
-              LOG.error(String.format("Fetching system stream metadata for: %s threw an exception.", streamNames),
-                  exception);
-              loop.done();
-              throw new SamzaException(exception);
-            }
-
-            return null;
+      new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
+        @Override
+        public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
+          if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
+            LOG.warn(
+                String.format("Fetching system stream metadata for: %s threw an exception. Retrying.", streamNames),
+                exception);
+          } else {
+            LOG.error(String.format("Fetching system stream metadata for: %s threw an exception.", streamNames),
+                exception);
+            loop.done();
+            throw new SamzaException(exception);
           }
-        };
+
+          return null;
+        }
+      };
 
     Function0<Map<String, SystemStreamMetadata>> fallbackOperation =
-        new AbstractFunction0<Map<String, SystemStreamMetadata>>() {
-          @Override
-          public Map<String, SystemStreamMetadata> apply() {
-            throw new SamzaException("Failed to get system stream metadata");
-          }
-        };
+      new AbstractFunction0<Map<String, SystemStreamMetadata>>() {
+        @Override
+        public Map<String, SystemStreamMetadata> apply() {
+          throw new SamzaException("Failed to get system stream metadata");
+        }
+      };
 
     return retryBackoff.run(fetchMetadataOperation, onExceptionRetryOperation).getOrElse(fallbackOperation);
   }
@@ -486,16 +486,16 @@
 
     topics.forEach(topic -> {
       OffsetsMaps offsetsForTopic = threadSafeKafkaConsumer.execute(consumer -> {
-         List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
-         if (partitionInfos == null) {
-           String msg = String.format("Partition info not(yet?) available for system %s topic %s", systemName, topic);
-           throw new SamzaException(msg);
-         }
-         List<TopicPartition> topicPartitions = partitionInfos.stream()
-          .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
-          .collect(Collectors.toList());
-         return fetchTopicPartitionsMetadata(topicPartitions);
-       });
+        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
+        if (partitionInfos == null) {
+          String msg = String.format("Partition info not(yet?) available for system %s topic %s", systemName, topic);
+          throw new SamzaException(msg);
+        }
+        List<TopicPartition> topicPartitions = partitionInfos.stream()
+            .map(partitionInfo -> new TopicPartition(partitionInfo.topic(), partitionInfo.partition()))
+            .collect(Collectors.toList());
+        return fetchTopicPartitionsMetadata(topicPartitions);
+      });
       allOldestOffsets.putAll(offsetsForTopic.getOldestOffsets());
       allNewestOffsets.putAll(offsetsForTopic.getNewestOffsets());
       allUpcomingOffsets.putAll(offsetsForTopic.getUpcomingOffsets());
@@ -516,7 +516,7 @@
   @Override
   public boolean createStream(StreamSpec streamSpec) {
     LOG.info("Creating Kafka topic: {} on system: {}", streamSpec.getPhysicalName(), streamSpec.getSystemName());
-    final String REPL_FACTOR = "replication.factor";
+    final String replFactor = "replication.factor";
 
     KafkaStreamSpec kafkaStreamSpec = toKafkaSpec(streamSpec);
     String topicName = kafkaStreamSpec.getPhysicalName();
@@ -527,11 +527,11 @@
     // specify the configs
     Map<String, String> streamConfig = new HashMap<>(kafkaStreamSpec.getConfig());
     // HACK - replication.factor is invalid config for AdminClient.createTopics
-    if (streamConfig.containsKey(REPL_FACTOR)) {
-      String repl = streamConfig.get(REPL_FACTOR);
+    if (streamConfig.containsKey(replFactor)) {
+      String repl = streamConfig.get(replFactor);
       LOG.warn("Configuration {}={} for topic={} is invalid. Using kSpec repl factor {}",
-          REPL_FACTOR, repl, kafkaStreamSpec.getPhysicalName(), kafkaStreamSpec.getReplicationFactor());
-      streamConfig.remove(REPL_FACTOR);
+          replFactor, repl, kafkaStreamSpec.getPhysicalName(), kafkaStreamSpec.getReplicationFactor());
+      streamConfig.remove(replFactor);
     }
     newTopic.configs(new MapConfig(streamConfig));
     CreateTopicsResult result = adminClient.createTopics(ImmutableSet.of(newTopic));
@@ -653,8 +653,8 @@
       Map<TopicPartition, RecordsToDelete> recordsToDelete = offsets.entrySet()
           .stream()
           .collect(Collectors.toMap(entry ->
-              new TopicPartition(entry.getKey().getStream(), entry.getKey().getPartition().getPartitionId()),
-              entry -> RecordsToDelete.beforeOffset(Long.parseLong(entry.getValue()) + 1)));
+            new TopicPartition(entry.getKey().getStream(), entry.getKey().getPartition().getPartitionId()),
+            entry -> RecordsToDelete.beforeOffset(Long.parseLong(entry.getValue()) + 1)));
 
       adminClient.deleteRecords(recordsToDelete).all().whenComplete((ignored, exception) -> {
         if (exception != null) {
diff --git a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemConsumer.java b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemConsumer.java
index 3974e02..27bd638 100644
--- a/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemConsumer.java
+++ b/samza-kafka/src/main/java/org/apache/samza/system/kafka/KafkaSystemConsumer.java
@@ -1,6 +1,4 @@
-
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,9 +15,7 @@
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- *
  */
-
 package org.apache.samza.system.kafka;
 
 import java.util.HashMap;
@@ -158,7 +154,7 @@
   void startConsumer() {
     // set the offset for each TopicPartition
     if (topicPartitionsToOffset.size() <= 0) {
-      LOG.error ("{}: Consumer is not subscribed to any SSPs", this);
+      LOG.error("{}: Consumer is not subscribed to any SSPs", this);
     }
 
     topicPartitionsToOffset.forEach((topicPartition, startingOffsetString) -> {
diff --git a/samza-kafka/src/main/java/org/apache/samza/system/kafka/descriptors/KafkaSystemDescriptor.java b/samza-kafka/src/main/java/org/apache/samza/system/kafka/descriptors/KafkaSystemDescriptor.java
index 8c4d48b..bf42320 100644
--- a/samza-kafka/src/main/java/org/apache/samza/system/kafka/descriptors/KafkaSystemDescriptor.java
+++ b/samza-kafka/src/main/java/org/apache/samza/system/kafka/descriptors/KafkaSystemDescriptor.java
@@ -228,7 +228,7 @@
   @Override
   public Map<String, String> toConfig() {
     Map<String, String> configs = new HashMap<>(super.toConfig());
-    if(!consumerZkConnect.isEmpty()) {
+    if (!consumerZkConnect.isEmpty()) {
       configs.put(String.format(CONSUMER_ZK_CONNECT_CONFIG_KEY, getSystemName()), String.join(",", consumerZkConnect));
     }
     consumerAutoOffsetResetOptional.ifPresent(consumerAutoOffsetReset ->
diff --git a/samza-kafka/src/test/java/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManagerJava.java b/samza-kafka/src/test/java/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManagerJava.java
index 4e52ced..1280e79 100644
--- a/samza-kafka/src/test/java/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManagerJava.java
+++ b/samza-kafka/src/test/java/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManagerJava.java
@@ -196,7 +196,7 @@
 
     // mock out a consumer that returns ten checkpoint IMEs for the same ssp
     List<List<IncomingMessageEnvelope>> pollOutputs = new ArrayList<>();
-    for(int offset = oldestOffset; offset <= newestOffset; offset++) {
+    for (int offset = oldestOffset; offset <= newestOffset; offset++) {
       pollOutputs.add(ImmutableList.of(newCheckpointEnvelope(TASK1, ssp, Integer.toString(offset))));
     }
 
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/MockKafkaProducer.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/MockKafkaProducer.java
index 95676b7..a28b23e 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/MockKafkaProducer.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/MockKafkaProducer.java
@@ -41,14 +41,13 @@
 import org.apache.kafka.common.MetricName;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.record.Record;
 import org.apache.kafka.common.record.RecordBatch;
 import org.apache.kafka.test.TestUtils;
 
 public class MockKafkaProducer implements Producer<byte[], byte[]> {
 
-  private Cluster _cluster;
-  private List<FutureTask<RecordMetadata>> _callbacksList = new ArrayList<FutureTask<RecordMetadata>>();
+  private Cluster cluster;
+  private List<FutureTask<RecordMetadata>> callbacksList = new ArrayList<FutureTask<RecordMetadata>>();
   private boolean shouldBuffer = false;
   private boolean errorNext = false;
   private boolean errorInCallback = true;
@@ -70,7 +69,7 @@
    *  - "Offset" in RecordMetadata is not guranteed to be correct
    */
   public MockKafkaProducer(int numNodes, String topicName, int numPartitions) {
-    this._cluster = TestUtils.clusterWith(numNodes, topicName, numPartitions);
+    this.cluster = TestUtils.clusterWith(numNodes, topicName, numPartitions);
   }
 
   public void setShouldBuffer(boolean shouldBuffer) {
@@ -110,7 +109,7 @@
     if (errorNext) {
       if (!errorInCallback) {
         this.errorNext = false;
-        throw (RuntimeException)exception;
+        throw (RuntimeException) exception;
       }
       if (shouldBuffer) {
         FutureTask<RecordMetadata> f = new FutureTask<RecordMetadata>(new Callable<RecordMetadata>() {
@@ -121,7 +120,7 @@
             return getRecordMetadata(record);
           }
         });
-        _callbacksList.add(f);
+        callbacksList.add(f);
         this.errorNext = false;
         return f;
       } else {
@@ -141,7 +140,7 @@
             return metadata;
           }
         });
-        _callbacksList.add(f);
+        callbacksList.add(f);
         return f;
       } else {
         int offset = msgsSent.incrementAndGet();
@@ -154,7 +153,7 @@
 
   @Override
   public List<PartitionInfo> partitionsFor(String topic) {
-    return this._cluster.partitionsForTopic(topic);
+    return this.cluster.partitionsForTopic(topic);
   }
 
   @Override
@@ -187,7 +186,7 @@
     return openCount;
   }
 
-  public synchronized void flush () {
+  public synchronized void flush() {
     new FlushRunnable(0).run();
   }
 
@@ -248,11 +247,11 @@
   private static class FutureSuccess implements Future<RecordMetadata> {
 
     private ProducerRecord record;
-    private final RecordMetadata _metadata;
+    private final RecordMetadata metadata;
 
     public FutureSuccess(ProducerRecord record, int offset) {
       this.record = record;
-      this._metadata = new RecordMetadata(new TopicPartition(record.topic(), record.partition() == null ? 0 : record.partition()), 0, offset, RecordBatch.NO_TIMESTAMP, -1L, -1, -1);
+      this.metadata = new RecordMetadata(new TopicPartition(record.topic(), record.partition() == null ? 0 : record.partition()), 0, offset, RecordBatch.NO_TIMESTAMP, -1L, -1, -1);
     }
 
     @Override
@@ -262,12 +261,12 @@
 
     @Override
     public RecordMetadata get() throws ExecutionException {
-      return this._metadata;
+      return this.metadata;
     }
 
     @Override
     public RecordMetadata get(long timeout, TimeUnit unit) throws ExecutionException {
-      return this._metadata;
+      return this.metadata;
     }
 
     @Override
@@ -282,21 +281,21 @@
   }
 
   private class FlushRunnable implements Runnable {
-    private final int _sleepTime;
+    private final int sleepTime;
 
     public FlushRunnable(int sleepTime) {
-      _sleepTime = sleepTime;
+      this.sleepTime = sleepTime;
     }
 
     public void run() {
-      FutureTask[] callbackArray = new FutureTask[_callbacksList.size()];
-      AtomicReferenceArray<FutureTask> _bufferList =
-          new AtomicReferenceArray<FutureTask>(_callbacksList.toArray(callbackArray));
+      FutureTask[] callbackArray = new FutureTask[callbacksList.size()];
+      AtomicReferenceArray<FutureTask> bufferList =
+          new AtomicReferenceArray<FutureTask>(callbacksList.toArray(callbackArray));
       ExecutorService executor = Executors.newFixedThreadPool(10);
       try {
-        for (int i = 0; i < _bufferList.length(); i++) {
-          Thread.sleep(_sleepTime);
-          FutureTask f = _bufferList.get(i);
+        for (int i = 0; i < bufferList.length(); i++) {
+          Thread.sleep(sleepTime);
+          FutureTask f = bufferList.get(i);
           if (!f.isDone()) {
             executor.submit(f).get();
           }
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaCheckpointManagerFactory.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaCheckpointManagerFactory.java
index 1846ea8..2494fcc 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaCheckpointManagerFactory.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaCheckpointManagerFactory.java
@@ -19,7 +19,6 @@
 
 package org.apache.samza.system.kafka;
 
-import org.apache.samza.checkpoint.kafka.KafkaCheckpointManagerFactory;
 import org.apache.samza.config.ApplicationConfig;
 import org.apache.samza.config.KafkaConfig;
 import org.apache.samza.config.MapConfig;
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaStreamSpec.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaStreamSpec.java
index 14d2fe6..8a48283 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaStreamSpec.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaStreamSpec.java
@@ -22,19 +22,19 @@
 import java.util.Map;
 import java.util.Properties;
 import org.apache.samza.system.StreamSpec;
-import org.apache.samza.util.TestStreamUtil;
 import org.junit.Test;
 
 import static org.junit.Assert.*;
 
 /**
- * See also the general StreamSpec tests in {@link TestStreamUtil}
+ * See also the general StreamSpec tests in {@link org.apache.samza.util.TestStreamUtil}
  */
 public class TestKafkaStreamSpec {
 
   @Test
   public void testUnsupportedConfigStrippedFromProperties() {
-    StreamSpec original = new StreamSpec("dummyId","dummyPhysicalName", "dummySystemName", ImmutableMap.of("segment.bytes", "4", "replication.factor", "7"));
+    StreamSpec original = new StreamSpec("dummyId", "dummyPhysicalName", "dummySystemName",
+        ImmutableMap.of("segment.bytes", "4", "replication.factor", "7"));
 
     // First verify the original
     assertEquals("7", original.get("replication.factor"));
@@ -61,6 +61,6 @@
 
   @Test(expected = IllegalArgumentException.class)
   public void testInvalidPartitionCount() {
-    new KafkaStreamSpec("dummyId","dummyPhysicalName", "dummySystemName", 0);
+    new KafkaStreamSpec("dummyId", "dummyPhysicalName", "dummySystemName", 0);
   }
 }
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminJava.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminJava.java
index 82d635f..cb8f34d 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminJava.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminJava.java
@@ -22,12 +22,10 @@
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.kafka.clients.admin.DescribeConfigsResult;
 import org.apache.kafka.clients.admin.DescribeTopicsResult;
 import org.apache.kafka.clients.admin.TopicDescription;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
@@ -93,7 +91,7 @@
 
     configResourceConfigMap.values().forEach(configEntry -> {
       configEntry.entries().forEach(config -> {
-          kafkaTopicConfig.put(config.name(), config.value());
+        kafkaTopicConfig.put(config.name(), config.value());
       });
     });
 
@@ -229,7 +227,7 @@
 
   @Test
   public void testCreateCoordinatorStreamWithSpecialCharsInTopicName() {
-    final String STREAM = "test.coordinator_test.Stream";
+    final String stream = "test.coordinator_test.Stream";
 
     Map<String, String> map = new HashMap<>();
     map.put("job.coordinator.segment.bytes", "123");
@@ -239,14 +237,14 @@
         String.valueOf(coordReplicatonFactor));
 
     KafkaSystemAdmin admin = Mockito.spy(createSystemAdmin(SYSTEM, map));
-    StreamSpec spec = StreamSpec.createCoordinatorStreamSpec(STREAM, SYSTEM);
+    StreamSpec spec = StreamSpec.createCoordinatorStreamSpec(stream, SYSTEM);
 
     Mockito.doAnswer(invocationOnMock -> {
       StreamSpec internalSpec = (StreamSpec) invocationOnMock.callRealMethod();
       assertTrue(internalSpec instanceof KafkaStreamSpec);  // KafkaStreamSpec is used to carry replication factor
       assertTrue(internalSpec.isCoordinatorStream());
       assertEquals(SYSTEM, internalSpec.getSystemName());
-      assertEquals(STREAM, internalSpec.getPhysicalName());
+      assertEquals(stream, internalSpec.getPhysicalName());
       assertEquals(1, internalSpec.getPartitionCount());
       Assert.assertEquals(coordReplicatonFactor, ((KafkaStreamSpec) internalSpec).getReplicationFactor());
       Assert.assertEquals("123", ((KafkaStreamSpec) internalSpec).getProperties().getProperty("segment.bytes"));
@@ -272,16 +270,16 @@
   }
 
   public void testCreateChangelogStreamHelp(final String topic) {
-    final int PARTITIONS = 12;
-    final int REP_FACTOR = 2;
+    final int partitions = 12;
+    final int repFactor = 2;
 
     Map<String, String> map = new HashMap<>();
     map.put(JobConfig.JOB_DEFAULT_SYSTEM, SYSTEM);
     map.put(String.format("stores.%s.changelog", "fakeStore"), topic);
-    map.put(String.format("stores.%s.changelog.replication.factor", "fakeStore"), String.valueOf(REP_FACTOR));
+    map.put(String.format("stores.%s.changelog.replication.factor", "fakeStore"), String.valueOf(repFactor));
     map.put(String.format("stores.%s.changelog.kafka.segment.bytes", "fakeStore"), "139");
     KafkaSystemAdmin admin = Mockito.spy(createSystemAdmin(SYSTEM, map));
-    StreamSpec spec = StreamSpec.createChangeLogStreamSpec(topic, SYSTEM, PARTITIONS);
+    StreamSpec spec = StreamSpec.createChangeLogStreamSpec(topic, SYSTEM, partitions);
 
     Mockito.doAnswer(invocationOnMock -> {
       StreamSpec internalSpec = (StreamSpec) invocationOnMock.callRealMethod();
@@ -289,8 +287,8 @@
       assertTrue(internalSpec.isChangeLogStream());
       assertEquals(SYSTEM, internalSpec.getSystemName());
       assertEquals(topic, internalSpec.getPhysicalName());
-      assertEquals(REP_FACTOR, ((KafkaStreamSpec) internalSpec).getReplicationFactor());
-      assertEquals(PARTITIONS, internalSpec.getPartitionCount());
+      assertEquals(repFactor, ((KafkaStreamSpec) internalSpec).getReplicationFactor());
+      assertEquals(partitions, internalSpec.getPartitionCount());
       assertEquals("139", ((KafkaStreamSpec) internalSpec).getProperties().getProperty("segment.bytes"));
       assertEquals("compact", ((KafkaStreamSpec) internalSpec).getProperties().getProperty("cleanup.policy"));
 
@@ -366,7 +364,7 @@
   }
 
   @Test
-  public void testShouldAssembleMetadata () {
+  public void testShouldAssembleMetadata() {
     Map<SystemStreamPartition, String> oldestOffsets = new ImmutableMap.Builder<SystemStreamPartition, String>()
         .put(new SystemStreamPartition(SYSTEM, "stream1", new Partition(0)), "o1")
         .put(new SystemStreamPartition(SYSTEM, "stream2", new Partition(0)), "o2")
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminWithMock.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminWithMock.java
index 25cab8c..cd1e707 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminWithMock.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemAdminWithMock.java
@@ -336,7 +336,7 @@
   }
 
   @Test(expected = SamzaException.class)
-  public void testGetSSPMetadataShouldTerminateAfterFiniteRetriesOnException() throws Exception{
+  public void testGetSSPMetadataShouldTerminateAfterFiniteRetriesOnException() throws Exception {
     SystemStreamPartition oneSSP = new SystemStreamPartition(TEST_SYSTEM, VALID_TOPIC, new Partition(0));
     SystemStreamPartition otherSSP = new SystemStreamPartition(TEST_SYSTEM, "otherTopic", new Partition(1));
 
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumer.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumer.java
index f5b1e8e..dd20248 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumer.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumer.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,9 +15,7 @@
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- *
  */
-
 package org.apache.samza.system.kafka;
 
 import java.util.HashMap;
@@ -173,7 +170,7 @@
     int partitionsNum = 2;
     int ime0Size = Integer.valueOf(FETCH_THRESHOLD_MSGS) / partitionsNum; // fake size, upto the limit
     int ime1Size = Integer.valueOf(FETCH_THRESHOLD_MSGS) / partitionsNum - 100; // fake size, below the limit
-    int ime11Size = 20;// event with the second message still below the size limit
+    int ime11Size = 20; // event with the second message still below the size limit
     ByteArraySerializer bytesSerde = new ByteArraySerializer();
     IncomingMessageEnvelope ime0 = new IncomingMessageEnvelope(ssp0, "0", bytesSerde.serialize("", "key0".getBytes()),
         bytesSerde.serialize("", "value0".getBytes()), ime0Size);
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumerMetrics.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumerMetrics.java
index 03b0564..5cc6f84 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumerMetrics.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemConsumerMetrics.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,9 +15,7 @@
  * KIND, either express or implied.  See the License for the
  * specific language governing permissions and limitations
  * under the License.
- *
  */
-
 package org.apache.samza.system.kafka;
 
 import java.util.HashMap;
@@ -96,7 +93,7 @@
 
   protected static void validate(Map<String, Metric> metricMap, Map<String, String> expectedValues) {
     // match the expected value, set in the test above, and the value in the metrics
-    for(Map.Entry<String, String> e: expectedValues.entrySet()) {
+    for (Map.Entry<String, String> e : expectedValues.entrySet()) {
       String metricName = e.getKey();
       String expectedValue = e.getValue();
       // get the metric from the registry
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemProducerJava.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemProducerJava.java
index 7fc450d..73673d3 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemProducerJava.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/TestKafkaSystemProducerJava.java
@@ -27,7 +27,6 @@
 import org.junit.Test;
 import scala.runtime.AbstractFunction0;
 
-import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertTrue;
 
 
@@ -38,19 +37,19 @@
   @Test
   public void testInstantiateProducer() {
     KafkaSystemProducer ksp = new KafkaSystemProducer("SysName", new ExponentialSleepStrategy(2.0, 200, 10000),
-        new AbstractFunction0<Producer<byte[], byte[]>>() {
-          @Override
-          public Producer<byte[], byte[]> apply() {
-            return new KafkaProducer<>(new HashMap<String, Object>());
-          }
-        }, new KafkaSystemProducerMetrics("SysName", new MetricsRegistryMap()), new AbstractFunction0<Object>() {
-      @Override
-      public Object apply() {
-        return System.currentTimeMillis();
-      }
-    }, false);
+      new AbstractFunction0<Producer<byte[], byte[]>>() {
+        @Override
+        public Producer<byte[], byte[]> apply() {
+          return new KafkaProducer<>(new HashMap<String, Object>());
+        }
+      }, new KafkaSystemProducerMetrics("SysName", new MetricsRegistryMap()), new AbstractFunction0<Object>() {
+        @Override
+        public Object apply() {
+          return System.currentTimeMillis();
+        }
+      }, false);
 
     long now = System.currentTimeMillis();
-    assertTrue((Long)ksp.clock().apply() >= now);
+    assertTrue((Long) ksp.clock().apply() >= now);
   }
 }
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaInputDescriptor.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaInputDescriptor.java
index 5bce72d..3992e90 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaInputDescriptor.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaInputDescriptor.java
@@ -25,12 +25,9 @@
 import org.apache.samza.serializers.IntegerSerde;
 import org.apache.samza.serializers.KVSerde;
 import org.apache.samza.serializers.StringSerde;
-import org.apache.samza.system.kafka.descriptors.KafkaInputDescriptor;
-import org.apache.samza.system.kafka.descriptors.KafkaSystemDescriptor;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 
 public class TestKafkaInputDescriptor {
   @Test
@@ -42,7 +39,7 @@
             .withConsumerAutoOffsetReset("largest")
             .withConsumerFetchMessageMaxBytes(1024 * 1024);
 
-    Map<String, String> generatedConfigs = isd.toConfig();;
+    Map<String, String> generatedConfigs = isd.toConfig();
     assertEquals("kafka", generatedConfigs.get("streams.input-stream.samza.system"));
     assertEquals("largest", generatedConfigs.get("systems.kafka.streams.input-stream.consumer.auto.offset.reset"));
     assertEquals("1048576", generatedConfigs.get("systems.kafka.streams.input-stream.consumer.fetch.message.max.bytes"));
diff --git a/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaSystemDescriptor.java b/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaSystemDescriptor.java
index 31469f8..2a31198 100644
--- a/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaSystemDescriptor.java
+++ b/samza-kafka/src/test/java/org/apache/samza/system/kafka/descriptors/TestKafkaSystemDescriptor.java
@@ -23,7 +23,6 @@
 
 import java.util.Map;
 import org.apache.samza.system.SystemStreamMetadata;
-import org.apache.samza.system.kafka.descriptors.KafkaSystemDescriptor;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
@@ -37,7 +36,7 @@
             .withProducerBootstrapServers(ImmutableList.of("localhost:567", "localhost:890"))
             .withDefaultStreamOffsetDefault(SystemStreamMetadata.OffsetType.OLDEST)
             .withConsumerAutoOffsetReset("smallest")
-            .withConsumerFetchMessageMaxBytes(1024*1024)
+            .withConsumerFetchMessageMaxBytes(1024 * 1024)
             .withSamzaFetchThreshold(10000)
             .withSamzaFetchThresholdBytes(1024 * 1024)
             .withConsumerConfigs(ImmutableMap.of("custom-consumer-config-key", "custom-consumer-config-value"))
diff --git a/samza-kafka/src/test/scala/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManager.scala b/samza-kafka/src/test/scala/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManager.scala
index 2e7a7e4..7d6db64 100644
--- a/samza-kafka/src/test/scala/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManager.scala
+++ b/samza-kafka/src/test/scala/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManager.scala
@@ -66,6 +66,7 @@
     props.map(_root_.kafka.server.KafkaConfig.fromProps)
   }
 
+  @Test
   def testWriteCheckpointShouldRecreateSystemProducerOnFailure(): Unit = {
     val checkpointTopic = "checkpoint-topic-2"
     val mockKafkaProducer: SystemProducer = Mockito.mock(classOf[SystemProducer])
@@ -82,7 +83,6 @@
     val spec = new KafkaStreamSpec("id", checkpointTopic, checkpointSystemName, 1, 1, props)
     val checkPointManager = Mockito.spy(new KafkaCheckpointManager(spec, new MockSystemFactory, false, config, new NoOpMetricsRegistry))
     val newKafkaProducer: SystemProducer = Mockito.mock(classOf[SystemProducer])
-    checkPointManager.MaxRetryDurationInMillis = 1
 
     Mockito.doReturn(newKafkaProducer).when(checkPointManager).getSystemProducer()
 
diff --git a/samza-kafka/src/test/scala/org/apache/samza/config/TestKafkaConfig.scala b/samza-kafka/src/test/scala/org/apache/samza/config/TestKafkaConfig.scala
index 64b476b..f62e8a3 100644
--- a/samza-kafka/src/test/scala/org/apache/samza/config/TestKafkaConfig.scala
+++ b/samza-kafka/src/test/scala/org/apache/samza/config/TestKafkaConfig.scala
@@ -169,7 +169,7 @@
     assertEquals("otherstream", storeToChangelog.getOrDefault("test3", ""))
     assertNull(kafkaConfig.getChangelogKafkaProperties("test1").getProperty("retention.ms"))
     assertNull(kafkaConfig.getChangelogKafkaProperties("test2").getProperty("retention.ms"))
-    assertNull(kafkaConfig.getChangelogKafkaProperties("test1").getProperty("min.compaction.lag.ms"))
+    assertNotNull(kafkaConfig.getChangelogKafkaProperties("test1").getProperty("min.compaction.lag.ms"))
 
     props.setProperty("systems." + SYSTEM_NAME + ".samza.factory", "org.apache.samza.system.kafka.SomeOtherFactory")
     val storeToChangelog1 = kafkaConfig.getKafkaChangelogEnabledStores()
diff --git a/samza-kv-couchbase/src/test/java/org/apache/samza/table/remote/couchbase/TestCouchbaseBucketRegistry.java b/samza-kv-couchbase/src/test/java/org/apache/samza/table/remote/couchbase/TestCouchbaseBucketRegistry.java
index 960712d..e2c5ab1 100644
--- a/samza-kv-couchbase/src/test/java/org/apache/samza/table/remote/couchbase/TestCouchbaseBucketRegistry.java
+++ b/samza-kv-couchbase/src/test/java/org/apache/samza/table/remote/couchbase/TestCouchbaseBucketRegistry.java
@@ -22,7 +22,6 @@
 import com.couchbase.client.java.Bucket;
 import com.couchbase.client.java.CouchbaseCluster;
 import com.couchbase.client.java.env.CouchbaseEnvironment;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import org.junit.Test;
@@ -60,9 +59,9 @@
     when(CouchbaseCluster.create(any(CouchbaseEnvironment.class), anyListOf(String.class))).thenReturn(cluster);
     CouchbaseBucketRegistry registry = new CouchbaseBucketRegistry();
     Bucket bucket1 = registry.getBucket(bucketName1, clusterNodes, configs);
-    Bucket bucket1_copy = registry.getBucket(bucketName1, clusterNodes, configs);
+    Bucket bucket1Copy = registry.getBucket(bucketName1, clusterNodes, configs);
     Bucket bucket2 = registry.getBucket(bucketName2, clusterNodes, configs);
-    assertEquals(bucket1, bucket1_copy);
+    assertEquals(bucket1, bucket1Copy);
     assertNotEquals(bucket1, bucket2);
   }
 
diff --git a/samza-kv-inmemory/src/main/java/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStorageEngineFactory.java b/samza-kv-inmemory/src/main/java/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStorageEngineFactory.java
new file mode 100644
index 0000000..8cd4e36
--- /dev/null
+++ b/samza-kv-inmemory/src/main/java/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStorageEngineFactory.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.storage.kv.inmemory;
+
+import java.io.File;
+import org.apache.samza.context.ContainerContext;
+import org.apache.samza.context.JobContext;
+import org.apache.samza.metrics.MetricsRegistry;
+import org.apache.samza.storage.kv.BaseKeyValueStorageEngineFactory;
+import org.apache.samza.storage.kv.KeyValueStore;
+import org.apache.samza.storage.kv.KeyValueStoreMetrics;
+import org.apache.samza.system.SystemStreamPartition;
+
+
+public class InMemoryKeyValueStorageEngineFactory<K, V> extends BaseKeyValueStorageEngineFactory<K, V> {
+  @Override
+  protected KeyValueStore<byte[], byte[]> getKVStore(String storeName,
+      File storeDir,
+      MetricsRegistry registry,
+      SystemStreamPartition changeLogSystemStreamPartition,
+      JobContext jobContext,
+      ContainerContext containerContext,
+      StoreMode storeMode) {
+    KeyValueStoreMetrics metrics = new KeyValueStoreMetrics(storeName, registry);
+    return new InMemoryKeyValueStore(metrics);
+  }
+}
diff --git a/samza-kv-inmemory/src/main/java/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStore.java b/samza-kv-inmemory/src/main/java/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStore.java
new file mode 100644
index 0000000..1d5a36c
--- /dev/null
+++ b/samza-kv-inmemory/src/main/java/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStore.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.storage.kv.inmemory;
+
+import java.nio.file.Path;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.ConcurrentSkipListMap;
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.UnsignedBytes;
+import org.apache.samza.checkpoint.CheckpointId;
+import org.apache.samza.storage.kv.Entry;
+import org.apache.samza.storage.kv.KeyValueIterator;
+import org.apache.samza.storage.kv.KeyValueSnapshot;
+import org.apache.samza.storage.kv.KeyValueStore;
+import org.apache.samza.storage.kv.KeyValueStoreMetrics;
+
+
+/**
+ * In-memory implementation of a {@link KeyValueStore}.
+ *
+ * This uses a {@link ConcurrentSkipListMap} to store the keys in order.
+ */
+public class InMemoryKeyValueStore implements KeyValueStore<byte[], byte[]> {
+  private final KeyValueStoreMetrics metrics;
+  private final ConcurrentSkipListMap<byte[], byte[]> underlying;
+
+  /**
+   * @param metrics A metrics instance to publish key-value store related statistics
+   */
+  public InMemoryKeyValueStore(KeyValueStoreMetrics metrics) {
+    this.metrics = metrics;
+    this.underlying = new ConcurrentSkipListMap<>(UnsignedBytes.lexicographicalComparator());
+  }
+
+  @Override
+  public byte[] get(byte[] key) {
+    this.metrics.gets().inc();
+    Preconditions.checkArgument(key != null, "Null argument 'key' not allowed");
+    byte[] found = this.underlying.get(key);
+    if (found != null) {
+      metrics.bytesRead().inc(found.length);
+    }
+    return found;
+  }
+
+  @Override
+  public void put(byte[] key, byte[] value) {
+    this.metrics.puts().inc();
+    Preconditions.checkArgument(key != null, "Null argument 'key' not allowed");
+    if (value == null) {
+      this.metrics.deletes().inc();
+      this.underlying.remove(key);
+    } else {
+      this.metrics.bytesWritten().inc(key.length + value.length);
+      this.underlying.put(key, value);
+    }
+  }
+
+  @Override
+  public void putAll(List<Entry<byte[], byte[]>> entries) {
+    // TreeMap's putAll requires a map, so we'd need to iterate over all the entries anyway
+    // to use it, in order to putAll here.  Therefore, just iterate here.
+    for (Entry<byte[], byte[]> next : entries) {
+      put(next.getKey(), next.getValue());
+    }
+  }
+
+  @Override
+  public void delete(byte[] key) {
+    // TODO Bug: SAMZA-2563: This double counts deletes for metrics, because put also counts a delete
+    metrics.deletes().inc();
+    put(key, null);
+  }
+
+  @Override
+  public KeyValueIterator<byte[], byte[]> range(byte[] from, byte[] to) {
+    this.metrics.ranges().inc();
+    Preconditions.checkArgument(from != null, "Null argument 'from' not allowed");
+    Preconditions.checkArgument(to != null, "Null argument 'to' not allowed");
+    return new InMemoryIterator(this.underlying.subMap(from, to).entrySet().iterator(), this.metrics);
+  }
+
+  @Override
+  public KeyValueSnapshot<byte[], byte[]> snapshot(byte[] from, byte[] to) {
+    // TODO: Bug: SAMZA-2564: does not satisfy immutability constraint, since entrySet is backed by the underlying map.
+    // snapshot the underlying map
+    Set<Map.Entry<byte[], byte[]>> entries = this.underlying.subMap(from, to).entrySet();
+    return new KeyValueSnapshot<byte[], byte[]>() {
+      @Override
+      public KeyValueIterator<byte[], byte[]> iterator() {
+        return new InMemoryIterator(entries.iterator(), metrics);
+      }
+
+      @Override
+      public void close() {
+      }
+    };
+  }
+
+  @Override
+  public KeyValueIterator<byte[], byte[]> all() {
+    this.metrics.alls().inc();
+    return new InMemoryIterator(this.underlying.entrySet().iterator(), this.metrics);
+  }
+
+  @Override
+  public Optional<Path> checkpoint(CheckpointId id) {
+    // No checkpoint being persisted. State restores from Changelog.
+    return Optional.empty();
+  }
+
+  @Override
+  public void flush() {
+    // No-op for In memory store.
+    metrics.flushes().inc();
+  }
+
+  @Override
+  public void close() {
+  }
+
+  private static class InMemoryIterator implements KeyValueIterator<byte[], byte[]> {
+    private final Iterator<Map.Entry<byte[], byte[]>> iter;
+    private final KeyValueStoreMetrics metrics;
+
+    private InMemoryIterator(Iterator<Map.Entry<byte[], byte[]>> iter, KeyValueStoreMetrics metrics) {
+      this.iter = iter;
+      this.metrics = metrics;
+    }
+
+    @Override
+    public boolean hasNext() {
+      return this.iter.hasNext();
+    }
+
+    @Override
+    public Entry<byte[], byte[]> next() {
+      Map.Entry<byte[], byte[]> n = this.iter.next();
+      if (n != null && n.getKey() != null) {
+        this.metrics.bytesRead().inc(n.getKey().length);
+      }
+      if (n != null && n.getValue() != null) {
+        this.metrics.bytesRead().inc(n.getValue().length);
+      }
+      return new Entry<>(n.getKey(), n.getValue());
+    }
+
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException("InMemoryKeyValueStore iterator doesn't support remove");
+    }
+
+    @Override
+    public void close() {
+    }
+  }
+}
diff --git a/samza-kv-inmemory/src/main/scala/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStorageEngineFactory.scala b/samza-kv-inmemory/src/main/scala/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStorageEngineFactory.scala
deleted file mode 100644
index 9a58760..0000000
--- a/samza-kv-inmemory/src/main/scala/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStorageEngineFactory.scala
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.samza.storage.kv.inmemory
-
-import java.io.File
-
-import org.apache.samza.context.{ContainerContext, JobContext}
-import org.apache.samza.metrics.MetricsRegistry
-import org.apache.samza.storage.StorageEngineFactory.StoreMode
-import org.apache.samza.storage.kv.{BaseKeyValueStorageEngineFactory, KeyValueStore, KeyValueStoreMetrics}
-import org.apache.samza.system.SystemStreamPartition
-
-class InMemoryKeyValueStorageEngineFactory[K, V] extends BaseKeyValueStorageEngineFactory[K, V] {
-
-  override def getKVStore(storeName: String,
-    storeDir: File,
-    registry: MetricsRegistry,
-    changeLogSystemStreamPartition: SystemStreamPartition,
-    jobContext: JobContext,
-    containerContext: ContainerContext, storeMode: StoreMode): KeyValueStore[Array[Byte], Array[Byte]] = {
-    val metrics = new KeyValueStoreMetrics(storeName, registry)
-    val inMemoryDb = new InMemoryKeyValueStore (metrics)
-    inMemoryDb
-  }
-
-}
diff --git a/samza-kv-inmemory/src/main/scala/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStore.scala b/samza-kv-inmemory/src/main/scala/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStore.scala
deleted file mode 100644
index 2d26d29..0000000
--- a/samza-kv-inmemory/src/main/scala/org/apache/samza/storage/kv/inmemory/InMemoryKeyValueStore.scala
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.samza.storage.kv.inmemory
-
-import com.google.common.primitives.UnsignedBytes
-import org.apache.samza.util.Logging
-import org.apache.samza.storage.kv._
-import java.nio.file.Path
-import java.util
-import java.util.Optional
-
-import org.apache.samza.checkpoint.CheckpointId
-
-/**
- * In memory implementation of a key value store.
- *
- * This uses a ConcurrentSkipListMap to store the keys in order
- *
- * @param metrics A metrics instance to publish key-value store related statistics
- */
-class InMemoryKeyValueStore(val metrics: KeyValueStoreMetrics = new KeyValueStoreMetrics)
-  extends KeyValueStore[Array[Byte], Array[Byte]] with Logging {
-
-  val underlying = new util.concurrent.ConcurrentSkipListMap[Array[Byte], Array[Byte]] (UnsignedBytes.lexicographicalComparator())
-
-  override def flush(): Unit = {
-    // No-op for In memory store.
-    metrics.flushes.inc
-  }
-
-  override def close(): Unit = Unit
-
-  private class InMemoryIterator (val iter: util.Iterator[util.Map.Entry[Array[Byte], Array[Byte]]])
-    extends KeyValueIterator[Array[Byte], Array[Byte]] {
-
-    override def close(): Unit = Unit
-
-    override def remove(): Unit = throw new UnsupportedOperationException("InMemoryKeyValueStore iterator doesn't support remove")
-
-    override def next(): Entry[Array[Byte], Array[Byte]] = {
-      val n = iter.next()
-      if (n != null && n.getKey != null) {
-        metrics.bytesRead.inc(n.getKey.size)
-      }
-      if (n != null && n.getValue != null) {
-        metrics.bytesRead.inc(n.getValue.size)
-      }
-      new Entry(n.getKey, n.getValue)
-    }
-
-    override def hasNext: Boolean = iter.hasNext
-  }
-
-  override def all(): KeyValueIterator[Array[Byte], Array[Byte]] = {
-    metrics.alls.inc
-
-    new InMemoryIterator(underlying.entrySet().iterator())
-  }
-
-  override def range(from: Array[Byte], to: Array[Byte]): KeyValueIterator[Array[Byte], Array[Byte]] = {
-    metrics.ranges.inc
-    require(from != null && to != null, "Null bound not allowed.")
-
-    new InMemoryIterator(underlying.subMap(from, to).entrySet().iterator())
-  }
-
-  override def delete(key: Array[Byte]): Unit = {
-    metrics.deletes.inc
-    put(key, null)
-  }
-
-  override def putAll(entries: util.List[Entry[Array[Byte], Array[Byte]]]): Unit = {
-    // TreeMap's putAll requires a map, so we'd need to iterate over all the entries anyway
-    // to use it, in order to putAll here.  Therefore, just iterate here.
-    val iter = entries.iterator()
-    while(iter.hasNext) {
-      val next = iter.next()
-      put(next.getKey, next.getValue)
-    }
-  }
-
-  override def put(key: Array[Byte], value: Array[Byte]): Unit = {
-    metrics.puts.inc
-    require(key != null, "Null key not allowed.")
-    if (value == null) {
-      metrics.deletes.inc
-      underlying.remove(key)
-    } else {
-      metrics.bytesWritten.inc(key.size + value.size)
-      underlying.put(key, value)
-    }
-  }
-
-  override def get(key: Array[Byte]): Array[Byte] = {
-    metrics.gets.inc
-    require(key != null, "Null key not allowed.")
-    val found = underlying.get(key)
-    if (found != null) {
-      metrics.bytesRead.inc(found.size)
-    }
-    found
-  }
-
-  override def snapshot(from: Array[Byte], to: Array[Byte]): KeyValueSnapshot[Array[Byte], Array[Byte]] = {
-    // snapshot the underlying map
-    val entries = underlying.subMap(from, to).entrySet()
-    new KeyValueSnapshot[Array[Byte], Array[Byte]] {
-      override def iterator(): KeyValueIterator[Array[Byte], Array[Byte]] = {
-        new InMemoryIterator(entries.iterator())
-      }
-
-      override def close() { }
-    }
-  }
-
-  override def checkpoint(id: CheckpointId): Optional[Path] = {
-    // No checkpoint being persisted. State restores from Changelog.
-    Optional.empty()
-  }
-}
diff --git a/samza-kv-inmemory/src/test/java/org/apache/samza/storage/kv/inmemory/TestInMemoryKeyValueStore.java b/samza-kv-inmemory/src/test/java/org/apache/samza/storage/kv/inmemory/TestInMemoryKeyValueStore.java
index 5919690..48dbde6 100644
--- a/samza-kv-inmemory/src/test/java/org/apache/samza/storage/kv/inmemory/TestInMemoryKeyValueStore.java
+++ b/samza-kv-inmemory/src/test/java/org/apache/samza/storage/kv/inmemory/TestInMemoryKeyValueStore.java
@@ -19,66 +19,546 @@
 
 package org.apache.samza.storage.kv.inmemory;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import com.google.common.collect.Iterators;
 import com.google.common.primitives.Ints;
-import org.apache.samza.metrics.MetricsRegistryMap;
+import org.apache.samza.SamzaException;
+import org.apache.samza.metrics.Counter;
 import org.apache.samza.storage.kv.Entry;
 import org.apache.samza.storage.kv.KeyValueIterator;
 import org.apache.samza.storage.kv.KeyValueSnapshot;
 import org.apache.samza.storage.kv.KeyValueStoreMetrics;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
 
-import java.io.ByteArrayOutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+import static org.mockito.Mockito.when;
+
 
 public class TestInMemoryKeyValueStore {
+  private static final String DEFAULT_KEY_PREFIX = "key_prefix";
+  private static final String OTHER_KEY_PREFIX = "other_key_prefix";
+  /**
+   * Keep the lengths of the values longer so that metrics validations for key and value sizes don't collide.
+   */
+  private static final String DEFAULT_VALUE_PREFIX = "value_prefix_value_prefix";
+  private static final String OTHER_VALUE_PREFIX = "other_value_prefix_value_prefix";
+
+  /**
+   * The length of the result of {@link #key(int)} will always be the same, so this can be used as the length for any
+   * key produced by {@link #key(int)}.
+   */
+  private static final int DEFAULT_KEY_LENGTH = key(0).length;
+  /**
+   * The length of the result of {@link #value(int)} will always be the same, so this can be used as the length for any
+   * key produced by {@link #value(int)}.
+   */
+  private static final int DEFAULT_VALUE_LENGTH = value(0).length;
+
+  @Mock
+  private KeyValueStoreMetrics keyValueStoreMetrics;
+  @Mock
+  private Counter getsCounter;
+  @Mock
+  private Counter bytesReadCounter;
+  @Mock
+  private Counter putsCounter;
+  @Mock
+  private Counter bytesWrittenCounter;
+  @Mock
+  private Counter deletesCounter;
+
+  private InMemoryKeyValueStore inMemoryKeyValueStore;
+
+  @Before
+  public void setup() {
+    MockitoAnnotations.initMocks(this);
+    when(this.keyValueStoreMetrics.gets()).thenReturn(this.getsCounter);
+    when(this.keyValueStoreMetrics.bytesRead()).thenReturn(this.bytesReadCounter);
+    when(this.keyValueStoreMetrics.puts()).thenReturn(this.putsCounter);
+    when(this.keyValueStoreMetrics.bytesWritten()).thenReturn(this.bytesWrittenCounter);
+    when(this.keyValueStoreMetrics.deletes()).thenReturn(this.deletesCounter);
+    this.inMemoryKeyValueStore = new InMemoryKeyValueStore(this.keyValueStoreMetrics);
+  }
+
   @Test
-  public void testSnapshot() throws Exception {
-    InMemoryKeyValueStore store = new InMemoryKeyValueStore(
-        new KeyValueStoreMetrics("testInMemory", new MetricsRegistryMap()));
-    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
-    String prefix = "prefix";
-    for(int i = 0; i < 100; i++) {
-      store.put(genKey(outputStream, prefix, i), genValue());
+  public void testGet() {
+    this.inMemoryKeyValueStore.put(key(0), value(0));
+    this.inMemoryKeyValueStore.put(key(OTHER_KEY_PREFIX, 1), value(OTHER_VALUE_PREFIX, 1));
+
+    assertArrayEquals(value(0), this.inMemoryKeyValueStore.get(key(0)));
+    assertArrayEquals(value(OTHER_VALUE_PREFIX, 1), this.inMemoryKeyValueStore.get(key(OTHER_KEY_PREFIX, 1)));
+    verify(this.getsCounter, times(2)).inc();
+    verify(this.bytesReadCounter).inc(DEFAULT_VALUE_LENGTH);
+    verify(this.bytesReadCounter).inc(value(OTHER_VALUE_PREFIX, 1).length);
+  }
+
+  @Test
+  public void testGetEmpty() {
+    assertNull(this.inMemoryKeyValueStore.get(key(0)));
+    verify(this.getsCounter).inc();
+    verifyZeroInteractions(this.bytesReadCounter);
+  }
+
+  @Test
+  public void testGetAfterDelete() {
+    this.inMemoryKeyValueStore.put(key(0), value(0));
+    this.inMemoryKeyValueStore.delete(key(0));
+
+    assertNull(this.inMemoryKeyValueStore.get(key(0)));
+    verify(this.getsCounter).inc();
+    verifyZeroInteractions(this.bytesReadCounter);
+  }
+
+  @Test
+  public void testPut() {
+    this.inMemoryKeyValueStore.put(key(0), value(0));
+    this.inMemoryKeyValueStore.put(key(OTHER_KEY_PREFIX, 1), value(OTHER_VALUE_PREFIX, 1));
+
+    assertArrayEquals(value(0), this.inMemoryKeyValueStore.get(key(0)));
+    assertArrayEquals(value(OTHER_VALUE_PREFIX, 1), this.inMemoryKeyValueStore.get(key(OTHER_KEY_PREFIX, 1)));
+    verify(this.putsCounter, times(2)).inc();
+    verify(this.bytesWrittenCounter).inc(DEFAULT_KEY_LENGTH + DEFAULT_VALUE_LENGTH);
+    verify(this.bytesWrittenCounter).inc(key(OTHER_KEY_PREFIX, 1).length + value(OTHER_VALUE_PREFIX, 1).length);
+  }
+
+  @Test
+  public void testPutExistingEntry() {
+    this.inMemoryKeyValueStore.put(key(0), value(0));
+    this.inMemoryKeyValueStore.put(key(0), value(OTHER_VALUE_PREFIX, 1));
+
+    assertArrayEquals(value(OTHER_VALUE_PREFIX, 1), this.inMemoryKeyValueStore.get(key(0)));
+    verify(this.putsCounter, times(2)).inc();
+    verify(this.bytesWrittenCounter).inc(DEFAULT_KEY_LENGTH + DEFAULT_VALUE_LENGTH);
+    verify(this.bytesWrittenCounter).inc(DEFAULT_KEY_LENGTH + value(OTHER_VALUE_PREFIX, 1).length);
+  }
+
+  @Test
+  public void testPutEmptyValue() {
+    byte[] emptyValue = new byte[0];
+    this.inMemoryKeyValueStore.put(key(0), emptyValue);
+
+    assertEquals(0, this.inMemoryKeyValueStore.get(key(0)).length);
+    verify(this.putsCounter).inc();
+    verify(this.bytesWrittenCounter).inc(DEFAULT_KEY_LENGTH);
+  }
+
+  @Test
+  public void testPutNull() {
+    this.inMemoryKeyValueStore.put(key(0), value(0));
+    this.inMemoryKeyValueStore.put(key(0), null);
+
+    assertNull(this.inMemoryKeyValueStore.get(key(0)));
+    verify(this.putsCounter, times(2)).inc();
+    verify(this.deletesCounter).inc();
+    verify(this.bytesWrittenCounter).inc(DEFAULT_KEY_LENGTH + DEFAULT_VALUE_LENGTH);
+  }
+
+  @Test
+  public void testPutAll() {
+    List<Entry<byte[], byte[]>> entries = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      entries.add(new Entry<>(key(i), value(i)));
     }
+    this.inMemoryKeyValueStore.putAll(entries);
 
-    byte[] firstKey = genKey(outputStream, prefix, 0);
-    byte[] lastKey = genKey(outputStream, prefix, 100);
-    KeyValueSnapshot<byte[], byte[]> snapshot = store.snapshot(firstKey, lastKey);
-    // Make sure the cached Iterable won't change when new elements are added
-    store.put(genKey(outputStream, prefix, 200), genValue());
-    assertTrue(Iterators.size(snapshot.iterator()) == 100);
-
-    List<Integer> keys = new ArrayList<>();
-    KeyValueIterator<byte[], byte[]> iter = snapshot.iterator();
-    while (iter.hasNext()) {
-      Entry<byte[], byte[]> entry = iter.next();
-      int key = Ints.fromByteArray(Arrays.copyOfRange(entry.getKey(), prefix.getBytes().length, entry.getKey().length));
-      keys.add(key);
+    for (int i = 0; i < 10; i++) {
+      assertArrayEquals(value(i), this.inMemoryKeyValueStore.get(key(i)));
     }
-    assertEquals(keys, IntStream.rangeClosed(0, 99).boxed().collect(Collectors.toList()));
-
-    outputStream.close();
-    store.close();
+    verify(this.putsCounter, times(10)).inc();
+    // when using key(i) and value(i), the byte[] lengths will be the same
+    verify(this.bytesWrittenCounter, times(10)).inc(DEFAULT_KEY_LENGTH + DEFAULT_VALUE_LENGTH);
   }
 
-  private byte[] genKey(ByteArrayOutputStream outputStream, String prefix, int i) throws Exception {
-    outputStream.reset();
-    outputStream.write(prefix.getBytes());
-    outputStream.write(Ints.toByteArray(i));
-    return outputStream.toByteArray();
+  @Test
+  public void testPutAllUpdate() {
+    // check that an existing value is overridden
+    this.inMemoryKeyValueStore.put(key(0), value(OTHER_VALUE_PREFIX, 0));
+    List<Entry<byte[], byte[]>> entries = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      entries.add(new Entry<>(key(i), value(i)));
+    }
+    this.inMemoryKeyValueStore.putAll(entries);
+
+    for (int i = 0; i < 10; i++) {
+      assertArrayEquals(value(i), this.inMemoryKeyValueStore.get(key(i)));
+    }
+    // 1 time for initial value to be overridden, 10 times for "regular" puts
+    verify(this.putsCounter, times(11)).inc();
+    // for initial value which is overridden
+    verify(this.bytesWrittenCounter).inc(DEFAULT_KEY_LENGTH + value(OTHER_VALUE_PREFIX, 0).length);
+    // when using key(i) and value(i), the byte[] lengths will be the same
+    verify(this.bytesWrittenCounter, times(10)).inc(DEFAULT_KEY_LENGTH + DEFAULT_VALUE_LENGTH);
   }
 
-  private byte[] genValue() {
-    int randomVal = ThreadLocalRandom.current().nextInt(0, 100000);
-    return Ints.toByteArray(randomVal);
+  @Test
+  public void testPutAllWithNull() {
+    List<Entry<byte[], byte[]>> entries = new ArrayList<>();
+    for (int i = 0; i < 10; i++) {
+      entries.add(new Entry<>(key(i), value(i)));
+    }
+    this.inMemoryKeyValueStore.putAll(entries);
+
+    List<Entry<byte[], byte[]>> deleteEntries = new ArrayList<>();
+    for (int i = 0; i < 3; i++) {
+      deleteEntries.add(new Entry<>(key(i), null));
+    }
+    this.inMemoryKeyValueStore.putAll(deleteEntries);
+
+    for (int i = 0; i < 10; i++) {
+      if (i < 3) {
+        assertNull(this.inMemoryKeyValueStore.get(key(i)));
+      } else {
+        assertArrayEquals(value(i), this.inMemoryKeyValueStore.get(key(i)));
+      }
+    }
+    // 10 times for "regular" puts, 3 times for deletion puts
+    verify(this.putsCounter, times(13)).inc();
+    // 10 "regular" puts all have same size for key/value
+    verify(this.bytesWrittenCounter, times(10)).inc(DEFAULT_KEY_LENGTH + DEFAULT_VALUE_LENGTH);
+    verifyNoMoreInteractions(this.bytesWrittenCounter);
+    verify(this.deletesCounter, times(3)).inc();
   }
-}
+
+  @Test
+  public void testDelete() {
+    this.inMemoryKeyValueStore.put(key(0), value(0));
+    this.inMemoryKeyValueStore.delete(key(0));
+    assertNull(this.inMemoryKeyValueStore.get(key(0)));
+
+    /*
+     * There is a bug in which deletes are double counted in metrics. This deletesCounter should only be invoked once
+     * when the bug is fixed.
+     */
+    verify(this.deletesCounter, times(2)).inc();
+  }
+
+  @Test
+  public void testDeleteNonExistentEntry() {
+    this.inMemoryKeyValueStore.delete(key(0));
+
+    assertNull(this.inMemoryKeyValueStore.get(key(0)));
+    /*
+     * There is a bug in which deletes are double counted in metrics. This deletesCounter should only be invoked once
+     * when the bug is fixed.
+     */
+    verify(this.deletesCounter, times(2)).inc();
+  }
+
+  @Test
+  public void testRange() {
+    Counter rangesCounter = mock(Counter.class);
+    when(this.keyValueStoreMetrics.ranges()).thenReturn(rangesCounter);
+
+    for (int i = 0; i < 10; i++) {
+      this.inMemoryKeyValueStore.put(key(OTHER_KEY_PREFIX, i), value(OTHER_VALUE_PREFIX, i));
+      this.inMemoryKeyValueStore.put(key(i), value(i));
+    }
+    KeyValueIterator<byte[], byte[]> range = this.inMemoryKeyValueStore.range(key(0), key(5));
+
+    for (int i = 0; i < 5; i++) {
+      assertEntryEquals(key(i), value(i), range.next());
+    }
+    assertFalse(range.hasNext());
+    verify(rangesCounter).inc();
+    // key size increments: key(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(5)).inc(DEFAULT_KEY_LENGTH);
+    // value size increments: value(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(5)).inc(DEFAULT_VALUE_LENGTH);
+  }
+
+  @Test
+  public void testRangeWithUpdate() {
+    Counter rangesCounter = mock(Counter.class);
+    when(this.keyValueStoreMetrics.ranges()).thenReturn(rangesCounter);
+
+    // exclude the index 1 entry so it can be added later
+    for (int i = 0; i < 10; i++) {
+      if (i != 1) {
+        this.inMemoryKeyValueStore.put(key(i), value(i));
+      }
+    }
+    KeyValueIterator<byte[], byte[]> range = this.inMemoryKeyValueStore.range(key(0), key(5));
+    assertEquals(4, Iterators.size(range));
+
+    this.inMemoryKeyValueStore.delete(key(2)); // delete an entry from the range
+    this.inMemoryKeyValueStore.put(key(3), value(OTHER_VALUE_PREFIX, 3)); // update an entry
+    this.inMemoryKeyValueStore.put(key(1), value(DEFAULT_VALUE_PREFIX, 1)); // add a new entry
+    range = this.inMemoryKeyValueStore.range(key(0), key(5));
+    for (int i = 0; i < 5; i++) {
+      if (i != 2) { // index 2 was deleted
+        if (i == 3) { // index 3 has an updated value
+          assertEntryEquals(key(i), value(OTHER_VALUE_PREFIX, 3), range.next());
+        } else {
+          // all other entries (including index 1 have the "normal" entries)
+          assertEntryEquals(key(i), value(DEFAULT_VALUE_PREFIX, i), range.next());
+        }
+      }
+    }
+    assertFalse(range.hasNext());
+
+    verify(rangesCounter, times(2)).inc();
+    // key increments: 4 for iterator size for first range, 4 for second range; key(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(8)).inc(DEFAULT_KEY_LENGTH);
+    /*
+     * value increments: 4 for iterator size for first range, 3 for second range (updated entry is different); value(i)
+     * produces byte[] of same length
+     */
+    verify(this.bytesReadCounter, times(7)).inc(DEFAULT_VALUE_LENGTH);
+    // 1 call for updated entry
+    verify(this.bytesReadCounter).inc(value(OTHER_VALUE_PREFIX, 0).length);
+  }
+
+  @Test
+  public void testSnapshot() {
+    for (int i = 0; i < 10; i++) {
+      this.inMemoryKeyValueStore.put(key(OTHER_KEY_PREFIX, i), value(OTHER_VALUE_PREFIX, i));
+      this.inMemoryKeyValueStore.put(key(i), value(i));
+    }
+    KeyValueSnapshot<byte[], byte[]> snapshot = this.inMemoryKeyValueStore.snapshot(key(0), key(5));
+    KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
+
+    for (int i = 0; i < 5; i++) {
+      assertEntryEquals(key(i), value(i), iterator.next());
+    }
+    assertFalse(iterator.hasNext());
+    // key size increments: key(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(5)).inc(DEFAULT_KEY_LENGTH);
+    // value size increments: value(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(5)).inc(DEFAULT_VALUE_LENGTH);
+  }
+
+  @Test
+  public void testSnapshotMultipleIterators() {
+    for (int i = 0; i < 10; i++) {
+      this.inMemoryKeyValueStore.put(key(OTHER_KEY_PREFIX, i), value(OTHER_VALUE_PREFIX, i));
+      this.inMemoryKeyValueStore.put(key(i), value(i));
+    }
+    KeyValueSnapshot<byte[], byte[]> snapshot = this.inMemoryKeyValueStore.snapshot(key(0), key(5));
+    assertEquals(5, Iterators.size(snapshot.iterator())); // Iterators.size exhausts the iterator
+    assertEquals(5, Iterators.size(snapshot.iterator()));
+    // key size increments: calling two separate iterators; key(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(5 * 2)).inc(DEFAULT_KEY_LENGTH);
+    // value size increments: calling two separate iterators; value(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(5 * 2)).inc(DEFAULT_VALUE_LENGTH);
+  }
+
+  @Test
+  public void testSnapshotImmutable() {
+    for (int i = 0; i < 10; i++) {
+      this.inMemoryKeyValueStore.put(key(i), value(i));
+    }
+    KeyValueSnapshot<byte[], byte[]> snapshot = this.inMemoryKeyValueStore.snapshot(key(0), key(5));
+    // make sure the entries in the snapshot don't change when something is added
+    this.inMemoryKeyValueStore.put(key(1), value(OTHER_VALUE_PREFIX, 1));
+    KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
+
+    for (int i = 0; i < 5; i++) {
+      if (i == 1) {
+        /*
+         * There is a bug in which the snapshot is impacted by writes after calling snapshot.
+         * When the bug is fixed, the value for key(1) should be the original value from when snapshot was called.
+         */
+        assertEntryEquals(key(i), value(OTHER_VALUE_PREFIX, 1), iterator.next());
+      } else {
+        assertEntryEquals(key(i), value(i), iterator.next());
+      }
+    }
+    assertFalse(iterator.hasNext());
+    // key size increments; key(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(5)).inc(DEFAULT_KEY_LENGTH);
+    // value size increments; value(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(4)).inc(DEFAULT_VALUE_LENGTH);
+    // when snapshot immutability bug is fixed, this should be merged into the bytesRead check above
+    verify(this.bytesReadCounter).inc(value(OTHER_VALUE_PREFIX, 1).length);
+  }
+
+  @Test
+  public void testSnapshotWithUpdate() {
+    // exclude the index 1 entry so it can be added later
+    for (int i = 0; i < 10; i++) {
+      if (i != 1) {
+        this.inMemoryKeyValueStore.put(key(i), value(i));
+      }
+    }
+    KeyValueSnapshot<byte[], byte[]> snapshot = this.inMemoryKeyValueStore.snapshot(key(0), key(5));
+    assertEquals(4, Iterators.size(snapshot.iterator()));
+
+    this.inMemoryKeyValueStore.delete(key(2)); // delete an entry fro the snapshot range
+    this.inMemoryKeyValueStore.put(key(3), value(OTHER_VALUE_PREFIX, 3)); // update an entry
+    this.inMemoryKeyValueStore.put(key(1), value(DEFAULT_VALUE_PREFIX, 1)); // add a new entry
+    snapshot = this.inMemoryKeyValueStore.snapshot(key(0), key(5));
+    KeyValueIterator<byte[], byte[]> iterator = snapshot.iterator();
+    for (int i = 0; i < 5; i++) {
+      if (i != 2) { // index 2 was deleted
+        if (i == 3) { // index 3 has an updated value
+          assertEntryEquals(key(i), value(OTHER_VALUE_PREFIX, 3), iterator.next());
+        } else {
+          // all other entries (including index 1 have the "normal" entries)
+          assertEntryEquals(key(i), value(DEFAULT_VALUE_PREFIX, i), iterator.next());
+        }
+      }
+    }
+    assertFalse(iterator.hasNext());
+
+    // key increments: 4 for iterator size for first range, 4 for second range; key(i) produces byte[] of same length
+    verify(this.bytesReadCounter, times(8)).inc(DEFAULT_KEY_LENGTH);
+    /*
+     * value increments: 4 for iterator size for first range, 3 for second range (updated entry is different); value(i)
+     * produces byte[] of same length
+     */
+    verify(this.bytesReadCounter, times(7)).inc(DEFAULT_VALUE_LENGTH);
+    // 1 call for updated entry
+    verify(this.bytesReadCounter).inc(value(OTHER_VALUE_PREFIX, 0).length);
+  }
+
+  @Test
+  public void testAll() {
+    Counter allsCounter = mock(Counter.class);
+    when(this.keyValueStoreMetrics.alls()).thenReturn(allsCounter);
+
+    for (int i = 0; i < 10; i++) {
+      this.inMemoryKeyValueStore.put(key(OTHER_KEY_PREFIX, i), value(OTHER_VALUE_PREFIX, i));
+      this.inMemoryKeyValueStore.put(key(i), value(i));
+    }
+    KeyValueIterator<byte[], byte[]> all = this.inMemoryKeyValueStore.all();
+
+    // all entries of one prefix comes first due to ordering
+    for (int i = 0; i < 10; i++) {
+      assertEntryEquals(key(i), value(i), all.next());
+    }
+    for (int i = 0; i < 10; i++) {
+      assertEntryEquals(key(OTHER_KEY_PREFIX, i), value(OTHER_VALUE_PREFIX, i), all.next());
+    }
+    assertFalse(all.hasNext());
+
+    verify(allsCounter).inc();
+
+    // key size increments: 10 calls for each prefix
+    verify(this.bytesReadCounter, times(10)).inc(DEFAULT_KEY_LENGTH);
+    // all keys using OTHER_KEY_PREFIX have the same length
+    int otherKeyLength = key(OTHER_KEY_PREFIX, 0).length;
+    verify(this.bytesReadCounter, times(10)).inc(otherKeyLength);
+
+    // value size increments: 10 calls for each prefix
+    verify(this.bytesReadCounter, times(10)).inc(DEFAULT_VALUE_LENGTH);
+    // all values using OTHER_VALUE_PREFIX have the same length
+    int otherValueLength = value(OTHER_VALUE_PREFIX, 0).length;
+    verify(this.bytesReadCounter, times(10)).inc(otherValueLength);
+  }
+
+  @Test
+  public void testAllWithUpdate() {
+    Counter allsCounter = mock(Counter.class);
+    when(this.keyValueStoreMetrics.alls()).thenReturn(allsCounter);
+
+    // fill in a range values for two different prefixes, but leave out index 1 so it can be added later
+    for (int i = 0; i < 10; i++) {
+      if (i != 1) {
+        this.inMemoryKeyValueStore.put(key(i), value(i));
+      }
+    }
+    KeyValueIterator<byte[], byte[]> all = this.inMemoryKeyValueStore.all();
+    assertEquals(9, Iterators.size(all));
+
+    this.inMemoryKeyValueStore.delete(key(2)); // delete an entry
+    this.inMemoryKeyValueStore.put(key(3), value(OTHER_VALUE_PREFIX, 3)); // update an entry
+    this.inMemoryKeyValueStore.put(key(1), value(1)); // add a new entry
+    all = this.inMemoryKeyValueStore.all();
+    for (int i = 0; i < 10; i++) {
+      if (i != 2) { // index 2 was deleted
+        if (i == 3) { // index 3 has an updated value
+          assertEntryEquals(key(i), value(OTHER_VALUE_PREFIX, 3), all.next());
+        } else {
+          // all other entries (including index 1 have the "normal" entries)
+          assertEntryEquals(key(i), value(i), all.next());
+        }
+      }
+    }
+    assertFalse(all.hasNext());
+
+    verify(allsCounter, times(2)).inc();
+    // key size increments: 9 calls for iterator size check of first "all", 9 calls for second "all"
+    verify(this.bytesReadCounter, times(18)).inc(DEFAULT_KEY_LENGTH);
+    /*
+     * value size increments: 9 calls for iterator size check of first "all", 8 calls for second "all" (updated entry is
+     * different)
+     */
+    verify(this.bytesReadCounter, times(17)).inc(DEFAULT_VALUE_LENGTH);
+    // 1 call for "updatedValue"
+    verify(this.bytesReadCounter).inc(value(OTHER_VALUE_PREFIX, 3).length);
+  }
+
+  @Test
+  public void testFlush() {
+    Counter flushesCounter = mock(Counter.class);
+    when(this.keyValueStoreMetrics.flushes()).thenReturn(flushesCounter);
+    this.inMemoryKeyValueStore.flush();
+    verify(flushesCounter).inc();
+  }
+
+  /**
+   * If this is called multiple times with the same {@code prefix} and any {@code i}, then this needs to return a byte[]
+   * of the same length for each call.
+   */
+  private static byte[] key(String prefix, int i) {
+    return toBytes(prefix, i);
+  }
+
+  /**
+   * The tests depend on the fact that this returns the same length byte[] for any i (for checking metrics).
+   */
+  private static byte[] key(int i) {
+    return key(DEFAULT_KEY_PREFIX, i);
+  }
+
+  /**
+   * If this is called multiple times with the same {@code prefix} and any {@code i}, then this needs to return a byte[]
+   * of the same length for each call.
+   */
+  private static byte[] value(String prefix, int i) {
+    return toBytes(prefix, i);
+  }
+
+  /**
+   * The tests depend on the fact that this returns the same length byte[] for any i (for checking metrics).
+   */
+  private static byte[] value(int i) {
+    return value(DEFAULT_VALUE_PREFIX, i);
+  }
+
+  /**
+   * Concatenates bytes for {@code prefix} with bytes for {@code i}.
+   * If this is called multiple times with the same {@code prefix} and any {@code i}, then this needs to return a byte[]
+   * of the same length for each call.
+   */
+  private static byte[] toBytes(String prefix, int i) {
+    // wrapping with try-catch to avoid dealing with checked exceptions
+    try {
+      ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+      byteArrayOutputStream.write(prefix.getBytes());
+      byteArrayOutputStream.write(Ints.toByteArray(i));
+      return byteArrayOutputStream.toByteArray();
+    } catch (IOException e) {
+      throw new SamzaException(e);
+    }
+  }
+
+  private static void assertEntryEquals(byte[] expectedKey, byte[] expectedValue, Entry<byte[], byte[]> entry) {
+    assertArrayEquals(expectedKey, entry.getKey());
+    assertArrayEquals(expectedValue, entry.getValue());
+  }
+}
\ No newline at end of file
diff --git a/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbOptionsHelper.java b/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbOptionsHelper.java
index 252eb83..b7c47e1 100644
--- a/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbOptionsHelper.java
+++ b/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbOptionsHelper.java
@@ -110,14 +110,14 @@
     options.setKeepLogFileNum(storeConfig.getLong(ROCKSDB_KEEP_LOG_FILE_NUM, 2));
     options.setDeleteObsoleteFilesPeriodMicros(storeConfig.getLong(ROCKSDB_DELETE_OBSOLETE_FILES_PERIOD_MICROS, 21600000000L));
     // The default for rocksdb is 18446744073709551615, which is larger than java Long.MAX_VALUE. Hence setting it only if it's passed.
-    if(storeConfig.containsKey(ROCKSDB_MAX_MANIFEST_FILE_SIZE)) {
-        options.setMaxManifestFileSize(storeConfig.getLong(ROCKSDB_MAX_MANIFEST_FILE_SIZE));
+    if (storeConfig.containsKey(ROCKSDB_MAX_MANIFEST_FILE_SIZE)) {
+      options.setMaxManifestFileSize(storeConfig.getLong(ROCKSDB_MAX_MANIFEST_FILE_SIZE));
     }
     // use prepareForBulk load only when i. the store is being requested in BulkLoad mode
     // and ii. the storeDirectory does not exist (fresh restore), because bulk load does not work seamlessly with
     // existing stores : https://github.com/facebook/rocksdb/issues/2734
     StorageManagerUtil storageManagerUtil = new StorageManagerUtil();
-    if(storeMode.equals(StorageEngineFactory.StoreMode.BulkLoad) && !storageManagerUtil.storeExists(storeDir)) {
+    if (storeMode.equals(StorageEngineFactory.StoreMode.BulkLoad) && !storageManagerUtil.storeExists(storeDir)) {
       log.info("Using prepareForBulkLoad for restore to " + storeDir);
       options.prepareForBulkLoad();
     }
diff --git a/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbReadingTool.java b/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbReadingTool.java
index d5f0f8b..1a922d5 100644
--- a/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbReadingTool.java
+++ b/samza-kv-rocksdb/src/main/java/org/apache/samza/storage/kv/RocksDbReadingTool.java
@@ -49,21 +49,21 @@
       .withOptionalArg()
       .ofType(Long.class)
       .describedAs("long-key")
-      .withValuesSeparatedBy( ',' );
+      .withValuesSeparatedBy(',');
 
   private ArgumentAcceptingOptionSpec<String> stringKeyArgu = parser()
       .accepts("string-key", "a list of string keys. Sperated by ','.")
       .withOptionalArg()
       .ofType(String.class)
       .describedAs("string-key")
-      .withValuesSeparatedBy( ',' );
+      .withValuesSeparatedBy(',');
 
   private ArgumentAcceptingOptionSpec<Integer> integerKeyArgu = parser()
       .accepts("integer-key", "a list of integer keys. Sperated by ','.")
       .withOptionalArg()
       .ofType(Integer.class)
       .describedAs("integer-key")
-      .withValuesSeparatedBy( ',' );
+      .withValuesSeparatedBy(',');
 
   private String dbPath = "";
   private String dbName = "";
diff --git a/samza-kv-rocksdb/src/test/java/org/apache/samza/storage/kv/TestRocksDbKeyValueStoreJava.java b/samza-kv-rocksdb/src/test/java/org/apache/samza/storage/kv/TestRocksDbKeyValueStoreJava.java
index 96a26ff..aea2423 100644
--- a/samza-kv-rocksdb/src/test/java/org/apache/samza/storage/kv/TestRocksDbKeyValueStoreJava.java
+++ b/samza-kv-rocksdb/src/test/java/org/apache/samza/storage/kv/TestRocksDbKeyValueStoreJava.java
@@ -55,7 +55,7 @@
 
     ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
     String prefix = "prefix";
-    for(int i = 0; i < 100; i++) {
+    for (int i = 0; i < 100; i++) {
       store.put(genKey(outputStream, prefix, i), genValue());
     }
 
@@ -95,7 +95,7 @@
     ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
     String prefix = "this is the key prefix";
     Random r = new Random();
-    for(int i = 0; i < 100000; i++) {
+    for (int i = 0; i < 100000; i++) {
       store.put(genKey(outputStream, prefix, r.nextInt()), genValue());
     }
 
diff --git a/samza-kv/src/main/java/org/apache/samza/storage/kv/BaseKeyValueStorageEngineFactory.java b/samza-kv/src/main/java/org/apache/samza/storage/kv/BaseKeyValueStorageEngineFactory.java
new file mode 100644
index 0000000..a3fc178
--- /dev/null
+++ b/samza-kv/src/main/java/org/apache/samza/storage/kv/BaseKeyValueStorageEngineFactory.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.storage.kv;
+
+import java.io.File;
+import java.util.Optional;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.samza.SamzaException;
+import org.apache.samza.config.Config;
+import org.apache.samza.config.MetricsConfig;
+import org.apache.samza.config.StorageConfig;
+import org.apache.samza.context.ContainerContext;
+import org.apache.samza.context.JobContext;
+import org.apache.samza.metrics.MetricsRegistry;
+import org.apache.samza.serializers.Serde;
+import org.apache.samza.storage.StorageEngine;
+import org.apache.samza.storage.StorageEngineFactory;
+import org.apache.samza.storage.StoreProperties;
+import org.apache.samza.system.SystemStreamPartition;
+import org.apache.samza.task.MessageCollector;
+import org.apache.samza.util.HighResolutionClock;
+import org.apache.samza.util.ScalaJavaUtil;
+
+
+/**
+ * This encapsulates all the steps needed to create a key value storage engine.
+ * This is meant to be extended by the specific key value store factory implementations which will in turn override the
+ * getKVStore method to return a raw key-value store.
+ */
+public abstract class BaseKeyValueStorageEngineFactory<K, V> implements StorageEngineFactory<K, V> {
+  private static final String INMEMORY_KV_STORAGE_ENGINE_FACTORY =
+      "org.apache.samza.storage.kv.inmemory.InMemoryKeyValueStorageEngineFactory";
+  private static final String WRITE_BATCH_SIZE = "write.batch.size";
+  private static final int DEFAULT_WRITE_BATCH_SIZE = 500;
+  private static final String OBJECT_CACHE_SIZE = "object.cache.size";
+  private static final int DEFAULT_OBJECT_CACHE_SIZE = 1000;
+
+  /**
+   * Implement this to return a KeyValueStore instance for the given store name, which will be used as the underlying
+   * raw store.
+   *
+   * @param storeName Name of the store
+   * @param storeDir The directory of the store
+   * @param registry MetricsRegistry to which to publish store specific metrics.
+   * @param changeLogSystemStreamPartition Samza stream partition from which to receive the changelog.
+   * @param jobContext Information about the job in which the task is executing.
+   * @param containerContext Information about the container in which the task is executing.
+   * @return A raw KeyValueStore instance
+   */
+  protected abstract KeyValueStore<byte[], byte[]> getKVStore(String storeName,
+      File storeDir,
+      MetricsRegistry registry,
+      SystemStreamPartition changeLogSystemStreamPartition,
+      JobContext jobContext,
+      ContainerContext containerContext,
+      StoreMode storeMode);
+
+  /**
+   * Constructs a key-value StorageEngine and returns it to the caller
+   *
+   * @param storeName The name of the storage engine.
+   * @param storeDir The directory of the storage engine.
+   * @param keySerde The serializer to use for serializing keys when reading or writing to the store.
+   * @param msgSerde The serializer to use for serializing messages when reading or writing to the store.
+   * @param changelogCollector MessageCollector the storage engine uses to persist changes.
+   * @param registry MetricsRegistry to which to publish storage-engine specific metrics.
+   * @param changelogSSP Samza system stream partition from which to receive the changelog.
+   * @param containerContext Information about the container in which the task is executing.
+   **/
+  public StorageEngine getStorageEngine(String storeName,
+      File storeDir,
+      Serde<K> keySerde,
+      Serde<V> msgSerde,
+      MessageCollector changelogCollector,
+      MetricsRegistry registry,
+      SystemStreamPartition changelogSSP,
+      JobContext jobContext,
+      ContainerContext containerContext,
+      StoreMode storeMode) {
+    Config storageConfigSubset = jobContext.getConfig().subset("stores." + storeName + ".", true);
+    StorageConfig storageConfig = new StorageConfig(jobContext.getConfig());
+    Optional<String> storeFactory = storageConfig.getStorageFactoryClassName(storeName);
+    StoreProperties.StorePropertiesBuilder storePropertiesBuilder = new StoreProperties.StorePropertiesBuilder();
+    if (!storeFactory.isPresent() || StringUtils.isBlank(storeFactory.get())) {
+      throw new SamzaException(
+          String.format("Store factory not defined for store %s. Cannot proceed with KV store creation!", storeName));
+    }
+    if (!storeFactory.get().equals(INMEMORY_KV_STORAGE_ENGINE_FACTORY)) {
+      storePropertiesBuilder.setPersistedToDisk(true);
+    }
+    int batchSize = storageConfigSubset.getInt(WRITE_BATCH_SIZE, DEFAULT_WRITE_BATCH_SIZE);
+    int cacheSize = storageConfigSubset.getInt(OBJECT_CACHE_SIZE, Math.max(batchSize, DEFAULT_OBJECT_CACHE_SIZE));
+    if (cacheSize > 0 && cacheSize < batchSize) {
+      throw new SamzaException(
+          String.format("cache.size for store %s cannot be less than batch.size as batched values reside in cache.",
+              storeName));
+    }
+    if (keySerde == null) {
+      throw new SamzaException(
+          String.format("Must define a key serde when using key value storage for store %s.", storeName));
+    }
+    if (msgSerde == null) {
+      throw new SamzaException(
+          String.format("Must define a message serde when using key value storage for store %s.", storeName));
+    }
+
+    KeyValueStore<byte[], byte[]> rawStore =
+        getKVStore(storeName, storeDir, registry, changelogSSP, jobContext, containerContext, storeMode);
+    KeyValueStore<byte[], byte[]> maybeLoggedStore = buildMaybeLoggedStore(changelogSSP,
+        storeName, registry, storePropertiesBuilder, rawStore, changelogCollector);
+    // this also applies serialization and caching layers
+    KeyValueStore<K, V> toBeAccessLoggedStore = buildStoreWithLargeMessageHandling(storeName, registry,
+        maybeLoggedStore, storageConfig, cacheSize, batchSize, keySerde, msgSerde);
+    KeyValueStore<K, V> maybeAccessLoggedStore =
+        buildMaybeAccessLoggedStore(storeName, toBeAccessLoggedStore, changelogCollector, changelogSSP, storageConfig,
+            keySerde);
+    KeyValueStore<K, V> nullSafeStore = new NullSafeKeyValueStore<>(maybeAccessLoggedStore);
+
+    KeyValueStorageEngineMetrics keyValueStorageEngineMetrics = new KeyValueStorageEngineMetrics(storeName, registry);
+    HighResolutionClock clock = buildClock(jobContext.getConfig());
+    return new KeyValueStorageEngine<>(storeName, storeDir, storePropertiesBuilder.build(), nullSafeStore, rawStore,
+        changelogSSP, changelogCollector, keyValueStorageEngineMetrics, batchSize,
+        ScalaJavaUtil.toScalaFunction(clock::nanoTime));
+  }
+
+  /**
+   * Wraps {@code storeToWrap} into a {@link LoggedStore} if {@code changelogSSP} is defined.
+   * Otherwise, returns the original {@code storeToWrap}.
+   */
+  private static KeyValueStore<byte[], byte[]> buildMaybeLoggedStore(SystemStreamPartition changelogSSP,
+      String storeName,
+      MetricsRegistry registry,
+      StoreProperties.StorePropertiesBuilder storePropertiesBuilder,
+      KeyValueStore<byte[], byte[]> storeToWrap,
+      MessageCollector changelogCollector) {
+    if (changelogSSP == null) {
+      return storeToWrap;
+    } else {
+      LoggedStoreMetrics loggedStoreMetrics = new LoggedStoreMetrics(storeName, registry);
+      storePropertiesBuilder.setLoggedStore(true);
+      return new LoggedStore<>(storeToWrap, changelogSSP, changelogCollector, loggedStoreMetrics);
+    }
+  }
+
+  /**
+   * Wraps {@code storeToWrap} with the proper layers to handle large messages.
+   * If "disallow.large.messages" is enabled, then the message will be serialized and the size will be checked before
+   * storing in the serialized message in the cache.
+   * If "disallow.large.messages" is disabled, then the deserialized message will be stored in the cache. If
+   * "drop.large.messages" is enabled, then large messages will not be sent to the logged store.
+   */
+  private static <T, U> KeyValueStore<T, U> buildStoreWithLargeMessageHandling(String storeName,
+      MetricsRegistry registry,
+      KeyValueStore<byte[], byte[]> storeToWrap,
+      StorageConfig storageConfig,
+      int cacheSize,
+      int batchSize,
+      Serde<T> keySerde,
+      Serde<U> msgSerde) {
+    int maxMessageSize = storageConfig.getChangelogMaxMsgSizeBytes(storeName);
+    if (storageConfig.getDisallowLargeMessages(storeName)) {
+      /*
+       * The store wrapping ordering is done this way so that a large message cannot end up in the cache. However, it
+       * also means that serialized data is in the cache, so performance will be worse since the data needs to be
+       * deserialized even when cached.
+       */
+      KeyValueStore<byte[], byte[]> maybeCachedStore =
+          buildMaybeCachedStore(storeName, registry, storeToWrap, cacheSize, batchSize);
+      // this will throw a RecordTooLargeException when a large message is encountered
+      LargeMessageSafeStore largeMessageSafeKeyValueStore =
+          new LargeMessageSafeStore(maybeCachedStore, storeName, false, maxMessageSize);
+      return buildSerializedStore(storeName, registry, largeMessageSafeKeyValueStore, keySerde, msgSerde);
+    } else {
+      KeyValueStore<byte[], byte[]> toBeSerializedStore;
+      if (storageConfig.getDropLargeMessages(storeName)) {
+        toBeSerializedStore = new LargeMessageSafeStore(storeToWrap, storeName, true, maxMessageSize);
+      } else {
+        toBeSerializedStore = storeToWrap;
+      }
+      KeyValueStore<T, U> serializedStore =
+          buildSerializedStore(storeName, registry, toBeSerializedStore, keySerde, msgSerde);
+      /*
+       * Allows deserialized entries to be stored in the cache, but it means that a large message may end up in the
+       * cache even though it was not persisted to the logged store.
+       */
+      return buildMaybeCachedStore(storeName, registry, serializedStore, cacheSize, batchSize);
+    }
+  }
+
+  /**
+   * Wraps {@code storeToWrap} with a {@link CachedStore} if caching is enabled.
+   * Otherwise, returns the {@code storeToWrap}.
+   */
+  private static <T, U> KeyValueStore<T, U> buildMaybeCachedStore(String storeName, MetricsRegistry registry,
+      KeyValueStore<T, U> storeToWrap, int cacheSize, int batchSize) {
+    if (cacheSize > 0) {
+      CachedStoreMetrics cachedStoreMetrics = new CachedStoreMetrics(storeName, registry);
+      return new CachedStore<>(storeToWrap, cacheSize, batchSize, cachedStoreMetrics);
+    } else {
+      return storeToWrap;
+    }
+  }
+
+  /**
+   * Wraps {@code storeToWrap} with a {@link SerializedKeyValueStore}.
+   */
+  private static <T, U> KeyValueStore<T, U> buildSerializedStore(String storeName,
+      MetricsRegistry registry,
+      KeyValueStore<byte[], byte[]> storeToWrap,
+      Serde<T> keySerde,
+      Serde<U> msgSerde) {
+    SerializedKeyValueStoreMetrics serializedMetrics = new SerializedKeyValueStoreMetrics(storeName, registry);
+    return new SerializedKeyValueStore<>(storeToWrap, keySerde, msgSerde, serializedMetrics);
+  }
+
+  /**
+   * Wraps {@code storeToWrap} with an {@link AccessLoggedStore} if enabled.
+   * Otherwise, returns the {@code storeToWrap}.
+   */
+  private static <T, U> KeyValueStore<T, U> buildMaybeAccessLoggedStore(String storeName,
+      KeyValueStore<T, U> storeToWrap,
+      MessageCollector changelogCollector,
+      SystemStreamPartition changelogSSP,
+      StorageConfig storageConfig,
+      Serde<T> keySerde) {
+    if (storageConfig.getAccessLogEnabled(storeName)) {
+      return new AccessLoggedStore<>(storeToWrap, changelogCollector, changelogSSP, storageConfig, storeName, keySerde);
+    } else {
+      return storeToWrap;
+    }
+  }
+
+  /**
+   * If "metrics.timer.enabled" is enabled, then returns a {@link HighResolutionClock} that uses
+   * {@link System#nanoTime}.
+   * Otherwise, returns a clock which always returns 0.
+   */
+  private static HighResolutionClock buildClock(Config config) {
+    MetricsConfig metricsConfig = new MetricsConfig(config);
+    if (metricsConfig.getMetricsTimerEnabled()) {
+      return System::nanoTime;
+    } else {
+      return () -> 0;
+    }
+  }
+}
diff --git a/samza-kv/src/main/java/org/apache/samza/storage/kv/LargeMessageSafeStore.java b/samza-kv/src/main/java/org/apache/samza/storage/kv/LargeMessageSafeStore.java
index 177a986..383ae42 100644
--- a/samza-kv/src/main/java/org/apache/samza/storage/kv/LargeMessageSafeStore.java
+++ b/samza-kv/src/main/java/org/apache/samza/storage/kv/LargeMessageSafeStore.java
@@ -22,6 +22,7 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.samza.checkpoint.CheckpointId;
 import org.apache.samza.metrics.MetricsRegistryMap;
 import org.slf4j.Logger;
@@ -85,8 +86,8 @@
   @Override
   public void putAll(List<Entry<byte[], byte[]>> entries) {
     entries.forEach(entry -> {
-        validateMessageSize(entry.getValue());
-      });
+      validateMessageSize(entry.getValue());
+    });
     List<Entry<byte[], byte[]>> largeMessageSafeEntries = removeLargeMessages(entries);
     store.putAll(largeMessageSafeEntries);
   }
@@ -145,14 +146,19 @@
   private List<Entry<byte[], byte[]>> removeLargeMessages(List<Entry<byte[], byte[]>> entries) {
     List<Entry<byte[], byte[]>> largeMessageSafeEntries = new ArrayList<>();
     entries.forEach(entry -> {
-        if (!isLargeMessage(entry.getValue())) {
-          largeMessageSafeEntries.add(entry);
-        } else {
-          LOG.info("Ignoring a large message with size " + entry.getValue().length + " since it is greater than "
-              + "the maximum allowed value of " + maxMessageSize);
-          largeMessageSafeStoreMetrics.ignoredLargeMessages().inc();
-        }
-      });
+      if (!isLargeMessage(entry.getValue())) {
+        largeMessageSafeEntries.add(entry);
+      } else {
+        LOG.info("Ignoring a large message with size " + entry.getValue().length + " since it is greater than "
+            + "the maximum allowed value of " + maxMessageSize);
+        largeMessageSafeStoreMetrics.ignoredLargeMessages().inc();
+      }
+    });
     return largeMessageSafeEntries;
   }
+
+  @VisibleForTesting
+  KeyValueStore<byte[], byte[]> getStore() {
+    return this.store;
+  }
 }
diff --git a/samza-kv/src/main/java/org/apache/samza/storage/kv/LocalTable.java b/samza-kv/src/main/java/org/apache/samza/storage/kv/LocalTable.java
index 89c2794..c94b3f1 100644
--- a/samza-kv/src/main/java/org/apache/samza/storage/kv/LocalTable.java
+++ b/samza-kv/src/main/java/org/apache/samza/storage/kv/LocalTable.java
@@ -118,12 +118,12 @@
     List<Entry<K, V>> toPut = new LinkedList<>();
     List<K> toDelete = new LinkedList<>();
     entries.forEach(e -> {
-        if (e.getValue() != null) {
-          toPut.add(e);
-        } else {
-          toDelete.add(e.getKey());
-        }
-      });
+      if (e.getValue() != null) {
+        toPut.add(e);
+      } else {
+        toDelete.add(e.getKey());
+      }
+    });
 
     if (!toPut.isEmpty()) {
       instrument(metrics.numPutAlls, metrics.putAllNs, () -> kvStore.putAll(toPut));
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/RecordTooLargeException.java b/samza-kv/src/main/java/org/apache/samza/storage/kv/RecordTooLargeException.java
similarity index 100%
rename from samza-kv/src/main/scala/org/apache/samza/storage/kv/RecordTooLargeException.java
rename to samza-kv/src/main/java/org/apache/samza/storage/kv/RecordTooLargeException.java
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/AccessLoggedStore.scala b/samza-kv/src/main/scala/org/apache/samza/storage/kv/AccessLoggedStore.scala
index 8c32793..a9db082 100644
--- a/samza-kv/src/main/scala/org/apache/samza/storage/kv/AccessLoggedStore.scala
+++ b/samza-kv/src/main/scala/org/apache/samza/storage/kv/AccessLoggedStore.scala
@@ -24,6 +24,7 @@
 import java.util
 import java.util.Optional
 
+import com.google.common.annotations.VisibleForTesting
 import org.apache.samza.checkpoint.CheckpointId
 import org.apache.samza.config.StorageConfig
 import org.apache.samza.task.MessageCollector
@@ -167,4 +168,9 @@
   override def checkpoint(id: CheckpointId): Optional[Path] = {
     store.checkpoint(id)
   }
+
+  @VisibleForTesting
+  private[kv] def getStore: KeyValueStore[K, V] = {
+    store
+  }
 }
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/BaseKeyValueStorageEngineFactory.scala b/samza-kv/src/main/scala/org/apache/samza/storage/kv/BaseKeyValueStorageEngineFactory.scala
deleted file mode 100644
index fef1deb..0000000
--- a/samza-kv/src/main/scala/org/apache/samza/storage/kv/BaseKeyValueStorageEngineFactory.scala
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.samza.storage.kv
-
-import java.io.File
-
-import org.apache.samza.SamzaException
-import org.apache.samza.config.{MetricsConfig, StorageConfig}
-import org.apache.samza.context.{ContainerContext, JobContext}
-import org.apache.samza.metrics.MetricsRegistry
-import org.apache.samza.serializers.Serde
-import org.apache.samza.storage.StorageEngineFactory.StoreMode
-import org.apache.samza.storage.{StorageEngine, StorageEngineFactory, StoreProperties}
-import org.apache.samza.system.SystemStreamPartition
-import org.apache.samza.task.MessageCollector
-import org.apache.samza.util.ScalaJavaUtil.JavaOptionals
-import org.apache.samza.util.{HighResolutionClock, Logging}
-
-/**
-  * A key value storage engine factory implementation
-  *
-  * This trait encapsulates all the steps needed to create a key value storage engine. It is meant to be extended
-  * by the specific key value store factory implementations which will in turn override the getKVStore method.
-  */
-trait BaseKeyValueStorageEngineFactory[K, V] extends StorageEngineFactory[K, V] {
-
-  private val INMEMORY_KV_STORAGE_ENGINE_FACTORY =
-    "org.apache.samza.storage.kv.inmemory.InMemoryKeyValueStorageEngineFactory"
-
-  /**
-   * Return a KeyValueStore instance for the given store name,
-   * which will be used as the underlying raw store
-   *
-   * @param storeName Name of the store
-   * @param storeDir The directory of the store
-   * @param registry MetricsRegistry to which to publish store specific metrics.
-   * @param changeLogSystemStreamPartition Samza stream partition from which to receive the changelog.
-   * @param containerContext Information about the container in which the task is executing.
-   * @return A valid KeyValueStore instance
-   */
-  def getKVStore(storeName: String,
-    storeDir: File,
-    registry: MetricsRegistry,
-    changeLogSystemStreamPartition: SystemStreamPartition,
-    jobContext: JobContext,
-    containerContext: ContainerContext, storeMode: StoreMode): KeyValueStore[Array[Byte], Array[Byte]]
-
-  /**
-   * Constructs a key-value StorageEngine and returns it to the caller
-   *
-   * @param storeName The name of the storage engine.
-   * @param storeDir The directory of the storage engine.
-   * @param keySerde The serializer to use for serializing keys when reading or writing to the store.
-   * @param msgSerde The serializer to use for serializing messages when reading or writing to the store.
-   * @param changelogCollector MessageCollector the storage engine uses to persist changes.
-   * @param registry MetricsRegistry to which to publish storage-engine specific metrics.
-   * @param changelogSSP Samza system stream partition from which to receive the changelog.
-   * @param containerContext Information about the container in which the task is executing.
-   **/
-  def getStorageEngine(storeName: String,
-    storeDir: File,
-    keySerde: Serde[K],
-    msgSerde: Serde[V],
-    changelogCollector: MessageCollector,
-    registry: MetricsRegistry,
-    changelogSSP: SystemStreamPartition,
-    jobContext: JobContext,
-    containerContext: ContainerContext, storeMode : StoreMode): StorageEngine = {
-    val storageConfigSubset = jobContext.getConfig.subset("stores." + storeName + ".", true)
-    val storageConfig = new StorageConfig(jobContext.getConfig)
-    val storeFactory = JavaOptionals.toRichOptional(storageConfig.getStorageFactoryClassName(storeName)).toOption
-    var storePropertiesBuilder = new StoreProperties.StorePropertiesBuilder()
-    val accessLog = storageConfig.getAccessLogEnabled(storeName)
-
-    var maxMessageSize = storageConfig.getChangelogMaxMsgSizeBytes(storeName)
-    val disallowLargeMessages = storageConfig.getDisallowLargeMessages(storeName)
-    val dropLargeMessage = storageConfig.getDropLargeMessages(storeName)
-
-    if (storeFactory.isEmpty) {
-      throw new SamzaException("Store factory not defined. Cannot proceed with KV store creation!")
-    }
-    if (!storeFactory.get.equals(INMEMORY_KV_STORAGE_ENGINE_FACTORY)) {
-      storePropertiesBuilder = storePropertiesBuilder.setPersistedToDisk(true)
-    }
-
-    val batchSize = storageConfigSubset.getInt("write.batch.size", 500)
-    val cacheSize = storageConfigSubset.getInt("object.cache.size", math.max(batchSize, 1000))
-    val enableCache = cacheSize > 0
-
-    if (cacheSize > 0 && cacheSize < batchSize) {
-      throw new SamzaException("A store's cache.size cannot be less than batch.size as batched values reside in cache.")
-    }
-
-    if (keySerde == null) {
-      throw new SamzaException("Must define a key serde when using key value storage.")
-    }
-
-    if (msgSerde == null) {
-      throw new SamzaException("Must define a message serde when using key value storage.")
-    }
-
-    val rawStore =
-      getKVStore(storeName, storeDir, registry, changelogSSP, jobContext, containerContext, storeMode)
-
-    // maybe wrap with logging
-    val maybeLoggedStore = if (changelogSSP == null) {
-      rawStore
-    } else {
-      val loggedStoreMetrics = new LoggedStoreMetrics(storeName, registry)
-      storePropertiesBuilder = storePropertiesBuilder.setLoggedStore(true)
-      new LoggedStore(rawStore, changelogSSP, changelogCollector, loggedStoreMetrics)
-    }
-
-    var toBeAccessLoggedStore: KeyValueStore[K, V] = null
-
-    // If large messages are disallowed in config, then this creates a LargeMessageSafeKeyValueStore that throws a
-    // RecordTooLargeException when a large message is encountered.
-    if (disallowLargeMessages) {
-      // maybe wrap with caching
-      val maybeCachedStore = if (enableCache) {
-        createCachedStore(storeName, registry, maybeLoggedStore, cacheSize, batchSize)
-      } else {
-        maybeLoggedStore
-      }
-
-      // wrap with large message checking
-      val largeMessageSafeKeyValueStore = new LargeMessageSafeStore(maybeCachedStore, storeName, false, maxMessageSize)
-      // wrap with serialization
-      val serializedMetrics = new SerializedKeyValueStoreMetrics(storeName, registry)
-      toBeAccessLoggedStore = new SerializedKeyValueStore[K, V](largeMessageSafeKeyValueStore, keySerde, msgSerde, serializedMetrics)
-
-    }
-    else {
-      val toBeSerializedStore = if (dropLargeMessage) {
-        // wrap with large message checking
-        new LargeMessageSafeStore(maybeLoggedStore, storeName, dropLargeMessage, maxMessageSize)
-      } else {
-        maybeLoggedStore
-      }
-      // wrap with serialization
-      val serializedMetrics = new SerializedKeyValueStoreMetrics(storeName, registry)
-      val serializedStore = new SerializedKeyValueStore[K, V](toBeSerializedStore, keySerde, msgSerde, serializedMetrics)
-      // maybe wrap with caching
-      toBeAccessLoggedStore = if (enableCache) {
-        createCachedStore(storeName, registry, serializedStore, cacheSize, batchSize)
-      } else {
-        serializedStore
-      }
-    }
-
-    val maybeAccessLoggedStore = if (accessLog) {
-      new AccessLoggedStore(toBeAccessLoggedStore, changelogCollector, changelogSSP, storageConfig, storeName, keySerde)
-    } else {
-      toBeAccessLoggedStore
-    }
-
-    // wrap with null value checking
-    val nullSafeStore = new NullSafeKeyValueStore(maybeAccessLoggedStore)
-
-    // create the storage engine and return
-    val keyValueStorageEngineMetrics = new KeyValueStorageEngineMetrics(storeName, registry)
-    val metricsConfig = new MetricsConfig(jobContext.getConfig)
-    val clock = if (metricsConfig.getMetricsTimerEnabled) {
-      new HighResolutionClock {
-        override def nanoTime(): Long = System.nanoTime()
-      }
-    } else {
-      new HighResolutionClock {
-        override def nanoTime(): Long = 0L
-      }
-    }
-
-    new KeyValueStorageEngine(storeName, storeDir, storePropertiesBuilder.build(), nullSafeStore, rawStore,
-      changelogSSP, changelogCollector, keyValueStorageEngineMetrics, batchSize, () => clock.nanoTime())
-  }
-
-  def createCachedStore[K, V](storeName: String, registry: MetricsRegistry,
-    underlyingStore: KeyValueStore[K, V], cacheSize: Int, batchSize: Int): KeyValueStore[K, V] = {
-    // wrap with caching
-    val cachedStoreMetrics = new CachedStoreMetrics(storeName, registry)
-    new CachedStore(underlyingStore, cacheSize, batchSize, cachedStoreMetrics)
-  }
-}
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/CachedStore.scala b/samza-kv/src/main/scala/org/apache/samza/storage/kv/CachedStore.scala
index 5c1961c..6dbc21f 100644
--- a/samza-kv/src/main/scala/org/apache/samza/storage/kv/CachedStore.scala
+++ b/samza-kv/src/main/scala/org/apache/samza/storage/kv/CachedStore.scala
@@ -25,6 +25,7 @@
 import java.nio.file.Path
 import java.util.{Arrays, Optional}
 
+import com.google.common.annotations.VisibleForTesting
 import org.apache.samza.checkpoint.CheckpointId
 
 /**
@@ -299,6 +300,11 @@
   override def checkpoint(id: CheckpointId): Optional[Path] = {
     store.checkpoint(id)
   }
+
+  @VisibleForTesting
+  private[kv] def getStore: KeyValueStore[K, V] = {
+    store
+  }
 }
 
 private case class CacheEntry[K, V](var value: V, var dirty: mutable.DoubleLinkedList[K])
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/KeyValueStorageEngine.scala b/samza-kv/src/main/scala/org/apache/samza/storage/kv/KeyValueStorageEngine.scala
index afba824..a9c0c09 100644
--- a/samza-kv/src/main/scala/org/apache/samza/storage/kv/KeyValueStorageEngine.scala
+++ b/samza-kv/src/main/scala/org/apache/samza/storage/kv/KeyValueStorageEngine.scala
@@ -29,6 +29,7 @@
 import java.nio.file.Path
 import java.util.Optional
 
+import com.google.common.annotations.VisibleForTesting
 import org.apache.samza.checkpoint.CheckpointId
 
 /**
@@ -196,7 +197,7 @@
       }
       lastBatchFlushed = true
     }
-    info(restoredMessages + " entries trimmed for store: " + storeName + " in directory: " + storeDir.toString + ".")
+    info(trimmedMessages + " entries trimmed for store: " + storeName + " in directory: " + storeDir.toString + ".")
 
     // flush the store and the changelog producer
     flush() // TODO HIGH pmaheshw SAMZA-2338: Need a way to flush changelog producers. This only flushes the stores.
@@ -252,4 +253,14 @@
       wrapperStore.snapshot(from, to)
     }
   }
+
+  @VisibleForTesting
+  private[kv] def getRawStore: KeyValueStore[Array[Byte], Array[Byte]] = {
+    rawStore
+  }
+
+  @VisibleForTesting
+  private[kv] def getWrapperStore: KeyValueStore[K, V] = {
+    wrapperStore
+  }
 }
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/LoggedStore.scala b/samza-kv/src/main/scala/org/apache/samza/storage/kv/LoggedStore.scala
index 320e801..2db754a 100644
--- a/samza-kv/src/main/scala/org/apache/samza/storage/kv/LoggedStore.scala
+++ b/samza-kv/src/main/scala/org/apache/samza/storage/kv/LoggedStore.scala
@@ -22,6 +22,7 @@
 import java.nio.file.Path
 import java.util.Optional
 
+import com.google.common.annotations.VisibleForTesting
 import org.apache.samza.checkpoint.CheckpointId
 import org.apache.samza.util.Logging
 import org.apache.samza.system.{OutgoingMessageEnvelope, SystemStreamPartition}
@@ -125,4 +126,9 @@
   override def checkpoint(id: CheckpointId): Optional[Path] = {
     store.checkpoint(id)
   }
+
+  @VisibleForTesting
+  private[kv] def getStore: KeyValueStore[K, V] = {
+    store
+  }
 }
\ No newline at end of file
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/NullSafeKeyValueStore.scala b/samza-kv/src/main/scala/org/apache/samza/storage/kv/NullSafeKeyValueStore.scala
index 8bb6fa2..feca04b 100644
--- a/samza-kv/src/main/scala/org/apache/samza/storage/kv/NullSafeKeyValueStore.scala
+++ b/samza-kv/src/main/scala/org/apache/samza/storage/kv/NullSafeKeyValueStore.scala
@@ -22,6 +22,7 @@
 import java.nio.file.Path
 import java.util.Optional
 
+import com.google.common.annotations.VisibleForTesting
 import org.apache.samza.checkpoint.CheckpointId
 
 import scala.collection.JavaConverters._
@@ -104,4 +105,9 @@
   override def checkpoint(id: CheckpointId): Optional[Path] = {
     store.checkpoint(id)
   }
+
+  @VisibleForTesting
+  private[kv] def getStore: KeyValueStore[K, V] = {
+    store
+  }
 }
diff --git a/samza-kv/src/main/scala/org/apache/samza/storage/kv/SerializedKeyValueStore.scala b/samza-kv/src/main/scala/org/apache/samza/storage/kv/SerializedKeyValueStore.scala
index 96566ac..b78e14a 100644
--- a/samza-kv/src/main/scala/org/apache/samza/storage/kv/SerializedKeyValueStore.scala
+++ b/samza-kv/src/main/scala/org/apache/samza/storage/kv/SerializedKeyValueStore.scala
@@ -22,6 +22,7 @@
 import java.nio.file.Path
 import java.util.Optional
 
+import com.google.common.annotations.VisibleForTesting
 import org.apache.samza.checkpoint.CheckpointId
 import org.apache.samza.util.Logging
 import org.apache.samza.serializers._
@@ -202,4 +203,9 @@
   override def checkpoint(id: CheckpointId): Optional[Path] = {
     store.checkpoint(id)
   }
+
+  @VisibleForTesting
+  private[kv] def getStore: KeyValueStore[Array[Byte], Array[Byte]] = {
+    store
+  }
 }
diff --git a/samza-kv/src/test/java/org/apache/samza/storage/kv/MockKeyValueStorageEngineFactory.java b/samza-kv/src/test/java/org/apache/samza/storage/kv/MockKeyValueStorageEngineFactory.java
new file mode 100644
index 0000000..3430ae9
--- /dev/null
+++ b/samza-kv/src/test/java/org/apache/samza/storage/kv/MockKeyValueStorageEngineFactory.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.storage.kv;
+
+import java.io.File;
+import org.apache.samza.context.ContainerContext;
+import org.apache.samza.context.JobContext;
+import org.apache.samza.metrics.MetricsRegistry;
+import org.apache.samza.system.SystemStreamPartition;
+
+
+/**
+ * Used for testing {@link BaseKeyValueStorageEngineFactory}.
+ * Implements {@link #getKVStore} to return a pre-built {@link KeyValueStore}.
+ */
+public class MockKeyValueStorageEngineFactory extends BaseKeyValueStorageEngineFactory<String, String> {
+  private final KeyValueStore<byte[], byte[]> rawKeyValueStore;
+
+  public MockKeyValueStorageEngineFactory(KeyValueStore<byte[], byte[]> rawKeyValueStore) {
+    this.rawKeyValueStore = rawKeyValueStore;
+  }
+
+  @Override
+  protected KeyValueStore<byte[], byte[]> getKVStore(String storeName, File storeDir, MetricsRegistry registry,
+      SystemStreamPartition changeLogSystemStreamPartition, JobContext jobContext, ContainerContext containerContext,
+      StoreMode storeMode) {
+    return this.rawKeyValueStore;
+  }
+}
diff --git a/samza-kv/src/test/java/org/apache/samza/storage/kv/TestBaseKeyValueStorageEngineFactory.java b/samza-kv/src/test/java/org/apache/samza/storage/kv/TestBaseKeyValueStorageEngineFactory.java
new file mode 100644
index 0000000..22a1b57
--- /dev/null
+++ b/samza-kv/src/test/java/org/apache/samza/storage/kv/TestBaseKeyValueStorageEngineFactory.java
@@ -0,0 +1,295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.storage.kv;
+
+import java.io.File;
+import java.util.Map;
+import com.google.common.collect.ImmutableMap;
+import org.apache.samza.Partition;
+import org.apache.samza.SamzaException;
+import org.apache.samza.config.Config;
+import org.apache.samza.config.MapConfig;
+import org.apache.samza.config.StorageConfig;
+import org.apache.samza.context.ContainerContext;
+import org.apache.samza.context.JobContext;
+import org.apache.samza.metrics.Gauge;
+import org.apache.samza.metrics.MetricsRegistry;
+import org.apache.samza.serializers.Serde;
+import org.apache.samza.storage.StorageEngine;
+import org.apache.samza.storage.StorageEngineFactory;
+import org.apache.samza.storage.StoreProperties;
+import org.apache.samza.system.SystemStreamPartition;
+import org.apache.samza.task.MessageCollector;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+
+public class TestBaseKeyValueStorageEngineFactory {
+  private static final String STORE_NAME = "myStore";
+  private static final StorageEngineFactory.StoreMode STORE_MODE = StorageEngineFactory.StoreMode.ReadWrite;
+  private static final SystemStreamPartition CHANGELOG_SSP =
+      new SystemStreamPartition("system", "stream", new Partition(0));
+  private static final Map<String, String> BASE_CONFIG =
+      ImmutableMap.of(String.format(StorageConfig.FACTORY, STORE_NAME),
+          MockKeyValueStorageEngineFactory.class.getName());
+  private static final Map<String, String> DISABLE_CACHE =
+      ImmutableMap.of(String.format("stores.%s.object.cache.size", STORE_NAME), "0");
+  private static final Map<String, String> DISALLOW_LARGE_MESSAGES =
+      ImmutableMap.of(String.format(StorageConfig.DISALLOW_LARGE_MESSAGES, STORE_NAME), "true");
+  private static final Map<String, String> DROP_LARGE_MESSAGES =
+      ImmutableMap.of(String.format(StorageConfig.DROP_LARGE_MESSAGES, STORE_NAME), "true");
+  private static final Map<String, String> ACCESS_LOG_ENABLED =
+      ImmutableMap.of(String.format("stores.%s.accesslog.enabled", STORE_NAME), "true");
+
+  @Mock
+  private File storeDir;
+  @Mock
+  private Serde<String> keySerde;
+  @Mock
+  private Serde<String> msgSerde;
+  @Mock
+  private MessageCollector changelogCollector;
+  @Mock
+  private MetricsRegistry metricsRegistry;
+  @Mock
+  private JobContext jobContext;
+  @Mock
+  private ContainerContext containerContext;
+  @Mock
+  private KeyValueStore<byte[], byte[]> rawKeyValueStore;
+
+  @Before
+  public void setup() {
+    MockitoAnnotations.initMocks(this);
+    // some metrics objects need this for histogram metric instantiation
+    when(this.metricsRegistry.newGauge(any(), any())).thenReturn(mock(Gauge.class));
+  }
+
+  @Test(expected = SamzaException.class)
+  public void testMissingStoreFactory() {
+    Config config = new MapConfig();
+    callGetStorageEngine(config, null);
+  }
+
+  @Test(expected = SamzaException.class)
+  public void testInvalidCacheSize() {
+    Config config = new MapConfig(BASE_CONFIG,
+        ImmutableMap.of(String.format("stores.%s.write.cache.batch", STORE_NAME), "100",
+            String.format("stores.%s.object.cache.size", STORE_NAME), "50"));
+    callGetStorageEngine(config, null);
+  }
+
+  @Test(expected = SamzaException.class)
+  public void testMissingKeySerde() {
+    Config config = new MapConfig(BASE_CONFIG);
+    when(this.jobContext.getConfig()).thenReturn(config);
+    new MockKeyValueStorageEngineFactory(this.rawKeyValueStore).getStorageEngine(STORE_NAME, this.storeDir, null,
+        this.msgSerde, this.changelogCollector, this.metricsRegistry, null, this.jobContext, this.containerContext,
+        STORE_MODE);
+  }
+
+  @Test(expected = SamzaException.class)
+  public void testMissingValueSerde() {
+    Config config = new MapConfig(BASE_CONFIG);
+    when(this.jobContext.getConfig()).thenReturn(config);
+    new MockKeyValueStorageEngineFactory(this.rawKeyValueStore).getStorageEngine(STORE_NAME, this.storeDir,
+        this.keySerde, null, this.changelogCollector, this.metricsRegistry, null, this.jobContext,
+        this.containerContext, STORE_MODE);
+  }
+
+  @Test
+  public void testInMemoryKeyValueStore() {
+    Config config = new MapConfig(DISABLE_CACHE, ImmutableMap.of(String.format(StorageConfig.FACTORY, STORE_NAME),
+        "org.apache.samza.storage.kv.inmemory.InMemoryKeyValueStorageEngineFactory"));
+    StorageEngine storageEngine = callGetStorageEngine(config, null);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), false, false);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(nullSafeKeyValueStore.getStore(), SerializedKeyValueStore.class);
+    // config has the in-memory key-value factory, but still calling the test factory, so store will be the test store
+    assertEquals(this.rawKeyValueStore, serializedKeyValueStore.getStore());
+  }
+
+  @Test
+  public void testRawStoreOnly() {
+    Config config = new MapConfig(BASE_CONFIG, DISABLE_CACHE);
+    StorageEngine storageEngine = callGetStorageEngine(config, null);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, false);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(nullSafeKeyValueStore.getStore(), SerializedKeyValueStore.class);
+    assertEquals(this.rawKeyValueStore, serializedKeyValueStore.getStore());
+  }
+
+  @Test
+  public void testWithLoggedStore() {
+    Config config = new MapConfig(BASE_CONFIG, DISABLE_CACHE);
+    StorageEngine storageEngine = callGetStorageEngine(config, CHANGELOG_SSP);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, true);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(nullSafeKeyValueStore.getStore(), SerializedKeyValueStore.class);
+    LoggedStore<?, ?> loggedStore = assertAndCast(serializedKeyValueStore.getStore(), LoggedStore.class);
+    // type generics don't match due to wildcard type, but checking reference equality, so type generics don't matter
+    // noinspection AssertEqualsBetweenInconvertibleTypes
+    assertEquals(this.rawKeyValueStore, loggedStore.getStore());
+  }
+
+  @Test
+  public void testWithLoggedStoreAndCachedStore() {
+    Config config = new MapConfig(BASE_CONFIG);
+    StorageEngine storageEngine = callGetStorageEngine(config, CHANGELOG_SSP);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, true);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    CachedStore<?, ?> cachedStore = assertAndCast(nullSafeKeyValueStore.getStore(), CachedStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(cachedStore.getStore(), SerializedKeyValueStore.class);
+    LoggedStore<?, ?> loggedStore = assertAndCast(serializedKeyValueStore.getStore(), LoggedStore.class);
+    // type generics don't match due to wildcard type, but checking reference equality, so type generics don't matter
+    // noinspection AssertEqualsBetweenInconvertibleTypes
+    assertEquals(this.rawKeyValueStore, loggedStore.getStore());
+  }
+
+  @Test
+  public void testDisallowLargeMessages() {
+    Config config = new MapConfig(BASE_CONFIG, DISABLE_CACHE, DISALLOW_LARGE_MESSAGES);
+    StorageEngine storageEngine = callGetStorageEngine(config, null);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, false);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(nullSafeKeyValueStore.getStore(), SerializedKeyValueStore.class);
+    LargeMessageSafeStore largeMessageSafeStore =
+        assertAndCast(serializedKeyValueStore.getStore(), LargeMessageSafeStore.class);
+    assertEquals(this.rawKeyValueStore, largeMessageSafeStore.getStore());
+  }
+
+  @Test
+  public void testDisallowLargeMessagesWithCache() {
+    Config config = new MapConfig(BASE_CONFIG, DISALLOW_LARGE_MESSAGES);
+    StorageEngine storageEngine = callGetStorageEngine(config, null);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, false);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(nullSafeKeyValueStore.getStore(), SerializedKeyValueStore.class);
+    LargeMessageSafeStore largeMessageSafeStore =
+        assertAndCast(serializedKeyValueStore.getStore(), LargeMessageSafeStore.class);
+    CachedStore<?, ?> cachedStore = assertAndCast(largeMessageSafeStore.getStore(), CachedStore.class);
+    // type generics don't match due to wildcard type, but checking reference equality, so type generics don't matter
+    // noinspection AssertEqualsBetweenInconvertibleTypes
+    assertEquals(this.rawKeyValueStore, cachedStore.getStore());
+  }
+
+  @Test
+  public void testDropLargeMessages() {
+    Config config = new MapConfig(BASE_CONFIG, DISABLE_CACHE, DROP_LARGE_MESSAGES);
+    StorageEngine storageEngine = callGetStorageEngine(config, null);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, false);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(nullSafeKeyValueStore.getStore(), SerializedKeyValueStore.class);
+    LargeMessageSafeStore largeMessageSafeStore =
+        assertAndCast(serializedKeyValueStore.getStore(), LargeMessageSafeStore.class);
+    assertEquals(this.rawKeyValueStore, largeMessageSafeStore.getStore());
+  }
+
+  @Test
+  public void testDropLargeMessagesWithCache() {
+    Config config = new MapConfig(BASE_CONFIG, DROP_LARGE_MESSAGES);
+    StorageEngine storageEngine = callGetStorageEngine(config, null);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, false);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    CachedStore<?, ?> cachedStore = assertAndCast(nullSafeKeyValueStore.getStore(), CachedStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(cachedStore.getStore(), SerializedKeyValueStore.class);
+    LargeMessageSafeStore largeMessageSafeStore =
+        assertAndCast(serializedKeyValueStore.getStore(), LargeMessageSafeStore.class);
+    assertEquals(this.rawKeyValueStore, largeMessageSafeStore.getStore());
+  }
+
+  @Test
+  public void testAccessLogStore() {
+    Config config = new MapConfig(BASE_CONFIG, DISABLE_CACHE, ACCESS_LOG_ENABLED);
+    // AccessLoggedStore requires a changelog SSP
+    StorageEngine storageEngine = callGetStorageEngine(config, CHANGELOG_SSP);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = baseStorageEngineValidation(storageEngine);
+    assertStoreProperties(keyValueStorageEngine.getStoreProperties(), true, true);
+    NullSafeKeyValueStore<?, ?> nullSafeKeyValueStore =
+        assertAndCast(keyValueStorageEngine.getWrapperStore(), NullSafeKeyValueStore.class);
+    AccessLoggedStore<?, ?> accessLoggedStore =
+        assertAndCast(nullSafeKeyValueStore.getStore(), AccessLoggedStore.class);
+    SerializedKeyValueStore<?, ?> serializedKeyValueStore =
+        assertAndCast(accessLoggedStore.getStore(), SerializedKeyValueStore.class);
+    LoggedStore<?, ?> loggedStore = assertAndCast(serializedKeyValueStore.getStore(), LoggedStore.class);
+    // type generics don't match due to wildcard type, but checking reference equality, so type generics don't matter
+    // noinspection AssertEqualsBetweenInconvertibleTypes
+    assertEquals(this.rawKeyValueStore, loggedStore.getStore());
+  }
+
+  private static <T extends KeyValueStore<?, ?>> T assertAndCast(KeyValueStore<?, ?> keyValueStore, Class<T> clazz) {
+    assertTrue("Expected type " + clazz.getName(), clazz.isInstance(keyValueStore));
+    return clazz.cast(keyValueStore);
+  }
+
+  private KeyValueStorageEngine<?, ?> baseStorageEngineValidation(StorageEngine storageEngine) {
+    assertTrue(storageEngine instanceof KeyValueStorageEngine);
+    KeyValueStorageEngine<?, ?> keyValueStorageEngine = (KeyValueStorageEngine<?, ?>) storageEngine;
+    assertEquals(this.rawKeyValueStore, keyValueStorageEngine.getRawStore());
+    return keyValueStorageEngine;
+  }
+
+  private static void assertStoreProperties(StoreProperties storeProperties, boolean expectedPersistedToDisk,
+      boolean expectedLoggedStore) {
+    assertEquals(expectedPersistedToDisk, storeProperties.isPersistedToDisk());
+    assertEquals(expectedLoggedStore, storeProperties.isLoggedStore());
+  }
+
+  /**
+   * @param changelogSSP if non-null, then enables logged store
+   */
+  private StorageEngine callGetStorageEngine(Config config, SystemStreamPartition changelogSSP) {
+    when(this.jobContext.getConfig()).thenReturn(config);
+    return new MockKeyValueStorageEngineFactory(this.rawKeyValueStore).getStorageEngine(STORE_NAME, this.storeDir,
+        this.keySerde, this.msgSerde, this.changelogCollector, this.metricsRegistry, changelogSSP, this.jobContext,
+        this.containerContext, STORE_MODE);
+  }
+}
diff --git a/samza-log4j/src/test/java/org/apache/samza/logging/log4j/TestStreamAppender.java b/samza-log4j/src/test/java/org/apache/samza/logging/log4j/TestStreamAppender.java
index 3d3c39b..e5c1e97 100644
--- a/samza-log4j/src/test/java/org/apache/samza/logging/log4j/TestStreamAppender.java
+++ b/samza-log4j/src/test/java/org/apache/samza/logging/log4j/TestStreamAppender.java
@@ -190,11 +190,11 @@
     // Set up latch
     final CountDownLatch allMessagesSent = new CountDownLatch(messages.size());
     MockSystemProducer.listeners.add((source, envelope) -> {
-        allMessagesSent.countDown();
-        if (allMessagesSent.getCount() == messages.size() - 1) {
-          throw new RuntimeException(); // Throw on the first message
-        }
-      });
+      allMessagesSent.countDown();
+      if (allMessagesSent.getCount() == messages.size() - 1) {
+        throw new RuntimeException(); // Throw on the first message
+      }
+    });
 
     // Log the messages
     messages.forEach((message) -> log.info(message));
@@ -227,13 +227,13 @@
     final CountDownLatch allMessagesSent = new CountDownLatch(expectedMessagesSent); // We expect to drop all but the extra messages
     final CountDownLatch waitForTimeout = new CountDownLatch(1);
     MockSystemProducer.listeners.add((source, envelope) -> {
-        allMessagesSent.countDown();
-        try {
-          waitForTimeout.await();
-        } catch (InterruptedException e) {
-          fail("Test could not run properly because of a thread interrupt.");
-        }
-      });
+      allMessagesSent.countDown();
+      try {
+        waitForTimeout.await();
+      } catch (InterruptedException e) {
+        fail("Test could not run properly because of a thread interrupt.");
+      }
+    });
 
     // Log the messages. This is where the timeout will happen!
     messages.forEach((message) -> log.info(message));
diff --git a/samza-log4j2/src/test/java/org/apache/samza/logging/log4j2/TestStreamAppender.java b/samza-log4j2/src/test/java/org/apache/samza/logging/log4j2/TestStreamAppender.java
index 280f54e..0248343 100644
--- a/samza-log4j2/src/test/java/org/apache/samza/logging/log4j2/TestStreamAppender.java
+++ b/samza-log4j2/src/test/java/org/apache/samza/logging/log4j2/TestStreamAppender.java
@@ -204,11 +204,11 @@
     // Set up latch
     final CountDownLatch allMessagesSent = new CountDownLatch(messages.size());
     MockSystemProducer.listeners.add((source, envelope) -> {
-        allMessagesSent.countDown();
-        if (allMessagesSent.getCount() == messages.size() - 1) {
-          throw new RuntimeException(); // Throw on the first message
-        }
-      });
+      allMessagesSent.countDown();
+      if (allMessagesSent.getCount() == messages.size() - 1) {
+        throw new RuntimeException(); // Throw on the first message
+      }
+    });
 
     // Log the messages
     messages.forEach((message) -> log.info(message));
@@ -241,13 +241,13 @@
     final CountDownLatch allMessagesSent = new CountDownLatch(expectedMessagesSent); // We expect to drop all but the extra messages
     final CountDownLatch waitForTimeout = new CountDownLatch(1);
     MockSystemProducer.listeners.add((source, envelope) -> {
-        allMessagesSent.countDown();
-        try {
-          waitForTimeout.await();
-        } catch (InterruptedException e) {
-          fail("Test could not run properly because of a thread interrupt.");
-        }
-      });
+      allMessagesSent.countDown();
+      try {
+        waitForTimeout.await();
+      } catch (InterruptedException e) {
+        fail("Test could not run properly because of a thread interrupt.");
+      }
+    });
 
     // Log the messages. This is where the timeout will happen!
     messages.forEach((message) -> log.info(message));
diff --git a/samza-rest/src/main/bash/run-samza-rest-service.sh b/samza-rest/src/main/bash/run-samza-rest-service.sh
index bd52afd..a48daf3 100755
--- a/samza-rest/src/main/bash/run-samza-rest-service.sh
+++ b/samza-rest/src/main/bash/run-samza-rest-service.sh
@@ -16,7 +16,7 @@
 # specific language governing permissions and limitations
 # under the License.
 
-[[ $JAVA_OPTS != *-Dlog4j.configuration* ]] && export JAVA_OPTS="$JAVA_OPTS -Dlog4j.configuration=file:$(dirname $0)/log4j.xml"
+[[ $JAVA_OPTS != *-Dlog4j.configuration* ]] && export JAVA_OPTS="$JAVA_OPTS -Dlog4j.configuration=file:$(dirname $0)/log4j2.xml"
 [[ -z "$SAMZA_LOG_DIR" ]] && export SAMZA_LOG_DIR="$PWD/logs"
 
 exec $(dirname $0)/run-class.sh org.apache.samza.rest.SamzaRestService "$@"
diff --git a/samza-rest/src/main/java/org/apache/samza/monitor/JobsClient.java b/samza-rest/src/main/java/org/apache/samza/monitor/JobsClient.java
index 8d82600..4dc00b3 100644
--- a/samza-rest/src/main/java/org/apache/samza/monitor/JobsClient.java
+++ b/samza-rest/src/main/java/org/apache/samza/monitor/JobsClient.java
@@ -70,7 +70,7 @@
    */
   public List<Task> getTasks(JobInstance jobInstance) {
     return queryJobStatusServers(baseUrl -> String.format(ResourceConstants.GET_TASKS_URL, baseUrl,
-        jobInstance.getJobName(), jobInstance.getJobId()), new TypeReference<List<Task>>(){});
+        jobInstance.getJobName(), jobInstance.getJobId()), new TypeReference<List<Task>>() { });
   }
 
   /**
@@ -81,7 +81,7 @@
    */
   public JobStatus getJobStatus(JobInstance jobInstance) {
     Job job = queryJobStatusServers(baseUrl -> String.format(ResourceConstants.GET_JOBS_URL, baseUrl,
-        jobInstance.getJobName(), jobInstance.getJobId()), new TypeReference<Job>(){});
+        jobInstance.getJobName(), jobInstance.getJobId()), new TypeReference<Job>() { });
     return job.getStatus();
   }
 
diff --git a/samza-rest/src/main/java/org/apache/samza/monitor/LocalStoreMonitorConfig.java b/samza-rest/src/main/java/org/apache/samza/monitor/LocalStoreMonitorConfig.java
index f334c1f..6923f2f 100644
--- a/samza-rest/src/main/java/org/apache/samza/monitor/LocalStoreMonitorConfig.java
+++ b/samza-rest/src/main/java/org/apache/samza/monitor/LocalStoreMonitorConfig.java
@@ -88,7 +88,7 @@
    *         on the job status server.
    */
   public List<String> getJobStatusServers() {
-     return Arrays.asList(StringUtils.split(get(CONFIG_JOB_STATUS_SERVERS), ','));
+    return Arrays.asList(StringUtils.split(get(CONFIG_JOB_STATUS_SERVERS), ','));
   }
 
   /**
diff --git a/samza-rest/src/main/java/org/apache/samza/monitor/Monitor.java b/samza-rest/src/main/java/org/apache/samza/monitor/Monitor.java
index 0d48213..4a05538 100644
--- a/samza-rest/src/main/java/org/apache/samza/monitor/Monitor.java
+++ b/samza-rest/src/main/java/org/apache/samza/monitor/Monitor.java
@@ -28,11 +28,10 @@
  */
 public interface Monitor {
 
-    /**
-     * Do the work of the monitor. Because this can be arbitrary behavior up to and including script execution,
-     * IPC-related IOExceptions and concurrency-related InterruptedExceptions are caught by the SamzaMonitorService.
-     * @throws Exception if there was any problem running the monitor.
-     */
-    void monitor()
-        throws Exception;
+  /**
+   * Do the work of the monitor. Because this can be arbitrary behavior up to and including script execution,
+   * IPC-related IOExceptions and concurrency-related InterruptedExceptions are caught by the SamzaMonitorService.
+   * @throws Exception if there was any problem running the monitor.
+   */
+  void monitor() throws Exception;
 }
\ No newline at end of file
diff --git a/samza-rest/src/main/java/org/apache/samza/monitor/MonitorLoader.java b/samza-rest/src/main/java/org/apache/samza/monitor/MonitorLoader.java
index efbc29c..754ea9c 100644
--- a/samza-rest/src/main/java/org/apache/samza/monitor/MonitorLoader.java
+++ b/samza-rest/src/main/java/org/apache/samza/monitor/MonitorLoader.java
@@ -28,15 +28,14 @@
 public class MonitorLoader {
 
   public static Monitor instantiateMonitor(String monitorName, MonitorConfig monitorConfig,
-      MetricsRegistry metricsRegistry)
-      throws InstantiationException {
-      String factoryClass = monitorConfig.getMonitorFactoryClass();
-      try {
-        MonitorFactory monitorFactory = ReflectionUtil.getObj(factoryClass, MonitorFactory.class);
-        return monitorFactory.getMonitorInstance(monitorName, monitorConfig, metricsRegistry);
-      } catch (Exception e) {
-        throw (InstantiationException)
-            new InstantiationException("Unable to instantiate monitor with factory class " + factoryClass).initCause(e);
-      }
+      MetricsRegistry metricsRegistry) throws InstantiationException {
+    String factoryClass = monitorConfig.getMonitorFactoryClass();
+    try {
+      MonitorFactory monitorFactory = ReflectionUtil.getObj(factoryClass, MonitorFactory.class);
+      return monitorFactory.getMonitorInstance(monitorName, monitorConfig, metricsRegistry);
+    } catch (Exception e) {
+      throw (InstantiationException)
+          new InstantiationException("Unable to instantiate monitor with factory class " + factoryClass).initCause(e);
+    }
   }
 }
diff --git a/samza-rest/src/main/java/org/apache/samza/monitor/SamzaMonitorService.java b/samza-rest/src/main/java/org/apache/samza/monitor/SamzaMonitorService.java
index 05c4249..5558917 100644
--- a/samza-rest/src/main/java/org/apache/samza/monitor/SamzaMonitorService.java
+++ b/samza-rest/src/main/java/org/apache/samza/monitor/SamzaMonitorService.java
@@ -40,64 +40,64 @@
  */
 public class SamzaMonitorService {
 
-    private static final Logger LOGGER = LoggerFactory.getLogger(SamzaMonitorService.class);
-    private static final SecureRandom RANDOM = new SecureRandom();
+  private static final Logger LOGGER = LoggerFactory.getLogger(SamzaMonitorService.class);
+  private static final SecureRandom RANDOM = new SecureRandom();
 
-    private final SchedulingProvider scheduler;
-    private final SamzaRestConfig config;
-    private final MetricsRegistry metricsRegistry;
+  private final SchedulingProvider scheduler;
+  private final SamzaRestConfig config;
+  private final MetricsRegistry metricsRegistry;
 
-    public SamzaMonitorService(SamzaRestConfig config,
-                               MetricsRegistry metricsRegistry,
-                               SchedulingProvider schedulingProvider) {
-        this.config = config;
-        this.metricsRegistry = metricsRegistry;
-        this.scheduler = schedulingProvider;
-    }
+  public SamzaMonitorService(SamzaRestConfig config,
+                             MetricsRegistry metricsRegistry,
+                             SchedulingProvider schedulingProvider) {
+    this.config = config;
+    this.metricsRegistry = metricsRegistry;
+    this.scheduler = schedulingProvider;
+  }
 
-    public void start() {
-        try {
-            Map<String, MonitorConfig> monitorConfigs = getMonitorConfigs(config);
-            for (Map.Entry<String, MonitorConfig> entry : monitorConfigs.entrySet()) {
-                String monitorName = entry.getKey();
-                MonitorConfig monitorConfig = entry.getValue();
+  public void start() {
+    try {
+      Map<String, MonitorConfig> monitorConfigs = getMonitorConfigs(config);
+      for (Map.Entry<String, MonitorConfig> entry : monitorConfigs.entrySet()) {
+        String monitorName = entry.getKey();
+        MonitorConfig monitorConfig = entry.getValue();
 
-                if (!Strings.isNullOrEmpty(monitorConfig.getMonitorFactoryClass())) {
-                    int schedulingIntervalInMs = monitorConfig.getSchedulingIntervalInMs();
-                    int monitorSchedulingJitterInMs = (int) (RANDOM.nextInt(schedulingIntervalInMs + 1) * (monitorConfig.getSchedulingJitterPercent() / 100.0));
-                    schedulingIntervalInMs += monitorSchedulingJitterInMs;
-                    LOGGER.info("Scheduling the monitor: {} to run every {} ms.", monitorName, schedulingIntervalInMs);
-                    scheduler.schedule(getRunnable(instantiateMonitor(monitorName, monitorConfig, metricsRegistry)),
-                        schedulingIntervalInMs);
-                } else {
-                  // When MonitorFactoryClass is not defined in the config, ignore the monitor config
-                  LOGGER.warn("Not scheduling the monitor: {} to run, since monitor factory class is not set in config.", monitorName);
-                }
-            }
-        } catch (InstantiationException e) {
-            LOGGER.error("Exception when instantiating the monitor : ", e);
-            throw new SamzaException(e);
+        if (!Strings.isNullOrEmpty(monitorConfig.getMonitorFactoryClass())) {
+          int schedulingIntervalInMs = monitorConfig.getSchedulingIntervalInMs();
+          int monitorSchedulingJitterInMs = (int) (RANDOM.nextInt(schedulingIntervalInMs + 1) * (monitorConfig.getSchedulingJitterPercent() / 100.0));
+          schedulingIntervalInMs += monitorSchedulingJitterInMs;
+          LOGGER.info("Scheduling the monitor: {} to run every {} ms.", monitorName, schedulingIntervalInMs);
+          scheduler.schedule(getRunnable(instantiateMonitor(monitorName, monitorConfig, metricsRegistry)),
+              schedulingIntervalInMs);
+        } else {
+          // When MonitorFactoryClass is not defined in the config, ignore the monitor config
+          LOGGER.warn("Not scheduling the monitor: {} to run, since monitor factory class is not set in config.", monitorName);
         }
+      }
+    } catch (InstantiationException e) {
+      LOGGER.error("Exception when instantiating the monitor : ", e);
+      throw new SamzaException(e);
     }
+  }
 
-    public void stop() {
-        this.scheduler.stop();
-    }
+  public void stop() {
+    this.scheduler.stop();
+  }
 
-    private Runnable getRunnable(final Monitor monitor) {
-        return new Runnable() {
-            public void run() {
-                try {
-                    monitor.monitor();
-                } catch (IOException e) {
-                    LOGGER.error("Caught IOException during " + monitor.toString() + ".monitor()", e);
-                } catch (InterruptedException e) {
-                    Thread.currentThread().interrupt();
-                    LOGGER.error("Caught InterruptedException during " + monitor.toString() + ".monitor()", e);
-                } catch (Exception e) {
-                    LOGGER.error("Unexpected exception during {}.monitor()", monitor, e);
-                }
-            }
-        };
-    }
+  private Runnable getRunnable(final Monitor monitor) {
+    return new Runnable() {
+      public void run() {
+        try {
+          monitor.monitor();
+        } catch (IOException e) {
+          LOGGER.error("Caught IOException during " + monitor.toString() + ".monitor()", e);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+          LOGGER.error("Caught InterruptedException during " + monitor.toString() + ".monitor()", e);
+        } catch (Exception e) {
+          LOGGER.error("Unexpected exception during {}.monitor()", monitor, e);
+        }
+      }
+    };
+  }
 }
diff --git a/samza-rest/src/main/java/org/apache/samza/monitor/ScheduledExecutorSchedulingProvider.java b/samza-rest/src/main/java/org/apache/samza/monitor/ScheduledExecutorSchedulingProvider.java
index c0c448c..e1f0711 100644
--- a/samza-rest/src/main/java/org/apache/samza/monitor/ScheduledExecutorSchedulingProvider.java
+++ b/samza-rest/src/main/java/org/apache/samza/monitor/ScheduledExecutorSchedulingProvider.java
@@ -24,17 +24,17 @@
 
 public class ScheduledExecutorSchedulingProvider implements SchedulingProvider {
 
-    private final ScheduledExecutorService scheduler;
+  private final ScheduledExecutorService scheduler;
 
-    public ScheduledExecutorSchedulingProvider(ScheduledExecutorService scheduler) {
-        this.scheduler = scheduler;
-    }
+  public ScheduledExecutorSchedulingProvider(ScheduledExecutorService scheduler) {
+    this.scheduler = scheduler;
+  }
 
-    public void schedule(Runnable runnable, int interval) {
-        this.scheduler.scheduleAtFixedRate(runnable, 0, interval, MILLISECONDS);
-    }
+  public void schedule(Runnable runnable, int interval) {
+    this.scheduler.scheduleAtFixedRate(runnable, 0, interval, MILLISECONDS);
+  }
 
-    public void stop() {
-        this.scheduler.shutdownNow();
-    }
+  public void stop() {
+    this.scheduler.shutdownNow();
+  }
 }
diff --git a/samza-rest/src/main/java/org/apache/samza/monitor/SchedulingProvider.java b/samza-rest/src/main/java/org/apache/samza/monitor/SchedulingProvider.java
index 627a333..d68f6c1 100644
--- a/samza-rest/src/main/java/org/apache/samza/monitor/SchedulingProvider.java
+++ b/samza-rest/src/main/java/org/apache/samza/monitor/SchedulingProvider.java
@@ -22,9 +22,9 @@
  * Provides scheduling functionality to the SamzaMonitorService.
  */
 public interface SchedulingProvider {
-    /* Schedule a the given Runnable to run() every INTERVAL ms. */
-    void schedule(Runnable runnable, int intervalMs);
+  /* Schedule a the given Runnable to run() every INTERVAL ms. */
+  void schedule(Runnable runnable, int intervalMs);
 
-    /* Stop any future executions of any scheduled tasks. */
-    void stop();
+  /* Stop any future executions of any scheduled tasks. */
+  void stop();
 }
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/SamzaRestService.java b/samza-rest/src/main/java/org/apache/samza/rest/SamzaRestService.java
index a2db84b..c5d6af7 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/SamzaRestService.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/SamzaRestService.java
@@ -82,7 +82,8 @@
    * Command line interface to run the server.
    *
    * @param args arguments supported by {@link org.apache.samza.util.CommandLine}.
-   *             In particular, --config-path and --config-factory are used to read the Samza REST config file.
+   *             In particular, --config job.config.loader.properties.path and
+   *             --config job.config.loader.factory are used to read the Samza REST config file.
    * @throws Exception if the server could not be successfully started.
    */
   public static void main(String[] args)
@@ -119,7 +120,7 @@
     } catch (Throwable t) {
       log.error("Exception in main.", t);
     } finally {
-      if (schedulingProvider != null){
+      if (schedulingProvider != null) {
         schedulingProvider.stop();
       }
     }
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/model/yarn/YarnApplicationInfo.java b/samza-rest/src/main/java/org/apache/samza/rest/model/yarn/YarnApplicationInfo.java
index 8d55c89..a696999 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/model/yarn/YarnApplicationInfo.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/model/yarn/YarnApplicationInfo.java
@@ -19,8 +19,6 @@
 package org.apache.samza.rest.model.yarn;
 
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
 import org.apache.samza.rest.proxy.job.JobInstance;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.annotate.JsonProperty;
@@ -56,8 +54,8 @@
    * @return the job name to use for the job in YARN.
    */
   public static String getQualifiedJobName(JobInstance jobInstance) {
-    final String JOB_NAME_ID_FORMAT = "%s_%s";
-    return String.format(JOB_NAME_ID_FORMAT, jobInstance.getJobName(), jobInstance.getJobId());
+    final String jobNameIdFormat = "%s_%s";
+    return String.format(jobNameIdFormat, jobInstance.getJobName(), jobInstance.getJobId());
   }
 
   @JsonIgnoreProperties(ignoreUnknown = true)
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/AbstractJobProxy.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/AbstractJobProxy.java
index 8b35352..f409374 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/AbstractJobProxy.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/AbstractJobProxy.java
@@ -74,8 +74,8 @@
       throws IOException, InterruptedException {
     List<Job> allJobs = new ArrayList<>();
     Collection<JobInstance> jobInstances = getAllJobInstances();
-    for(JobInstance jobInstance : jobInstances) {
-        allJobs.add(new Job(jobInstance.getJobName(), jobInstance.getJobId()));
+    for (JobInstance jobInstance : jobInstances) {
+      allJobs.add(new Job(jobInstance.getJobName(), jobInstance.getJobId()));
     }
     getJobStatusProvider().getJobStatuses(allJobs);
 
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/JobProxy.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/JobProxy.java
index 7e168d7..d42ff7a 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/JobProxy.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/JobProxy.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.List;
 import org.apache.samza.rest.model.Job;
-import org.apache.samza.rest.model.JobStatus;
 
 
 /**
@@ -57,8 +56,8 @@
 
   /**
    * Starts the job instance specified by jobName and jobId. When this method returns, the status of the job
-   * should be {@link JobStatus#STARTING} or
-   * {@link JobStatus#STARTED} depending on the implementation.
+   * should be {@link org.apache.samza.rest.model.JobStatus#STARTING} or
+   * {@link org.apache.samza.rest.model.JobStatus#STARTED} depending on the implementation.
    *
    * @param jobInstance the instance of the job to start.
    * @throws Exception  if the job could not be successfully started.
@@ -68,7 +67,7 @@
 
   /**
    * Stops the job instance specified by jobName and jobId. When this method returns, the status of the job
-   * should be {@link JobStatus#STOPPED}.
+   * should be {@link org.apache.samza.rest.model.JobStatus#STOPPED}.
    *
    * @param jobInstance the instance of the job to stop.
    * @throws Exception  if the job could not be successfully stopped.
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/SimpleYarnJobProxy.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/SimpleYarnJobProxy.java
index 310c3e6..e982d0f 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/SimpleYarnJobProxy.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/SimpleYarnJobProxy.java
@@ -37,11 +37,10 @@
 public class SimpleYarnJobProxy extends ScriptJobProxy {
   private static final Logger log = LoggerFactory.getLogger(SimpleYarnJobProxy.class);
 
-  private static final String START_SCRIPT_NAME = "run-job.sh";
+  private static final String START_SCRIPT_NAME = "run-app.sh";
   private static final String STOP_SCRIPT_NAME = "kill-yarn-job-by-name.sh";
 
-  private static final String CONFIG_FACTORY_PARAM = "--config-factory=org.apache.samza.config.factories.PropertiesConfigFactory";
-  private static final String CONFIG_PATH_PARAM_FORMAT = "--config-path=file://%s";
+  private static final String CONFIG_PATH_PARAM_FORMAT = "--config-path=%s";
 
   private final JobStatusProvider statusProvider;
 
@@ -64,7 +63,7 @@
     }
 
     String scriptPath = getScriptPath(jobInstance, START_SCRIPT_NAME);
-    int resultCode = scriptRunner.runScript(scriptPath, CONFIG_FACTORY_PARAM,
+    int resultCode = scriptRunner.runScript(scriptPath,
         generateConfigPathParameter(jobInstance));
     if (resultCode != 0) {
       throw new SamzaException("Failed to start job. Result code: " + resultCode);
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/YarnRestJobStatusProvider.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/YarnRestJobStatusProvider.java
index 1009f10..e0b79ef 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/YarnRestJobStatusProvider.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/job/YarnRestJobStatusProvider.java
@@ -70,7 +70,7 @@
     // We will identify the YARN application states by their qualified names, so build a map
     // to translate back from that name to the JobInfo we wish to populate.
     final Map<String, Job> qualifiedJobToInfo = new HashMap<>();
-    for(Job job : jobs) {
+    for (Job job : jobs) {
       qualifiedJobToInfo.put(YarnApplicationInfo.getQualifiedJobName(new JobInstance(job.getJobName(), job.getJobId())), job);
     }
 
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/SamzaTaskProxy.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/SamzaTaskProxy.java
index 31931db..b86da1f 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/SamzaTaskProxy.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/SamzaTaskProxy.java
@@ -143,10 +143,10 @@
     StorageConfig storageConfig = new StorageConfig(consumer.getConfig());
     List<String> storeNames = storageConfig.getStoreNames();
     return taskNameToContainerIdMapping.entrySet()
-                                       .stream()
-                                       .map(entry -> {
-        String hostName = containerIdToHostMapping.get(entry.getValue()).get(SetContainerHostMapping.HOST_KEY);
-        return new Task(hostName, entry.getKey(), entry.getValue(), new ArrayList<>(), storeNames);
-                                       }).collect(Collectors.toList());
+        .stream()
+        .map(entry -> {
+          String hostName = containerIdToHostMapping.get(entry.getValue()).get(SetContainerHostMapping.HOST_KEY);
+          return new Task(hostName, entry.getKey(), entry.getValue(), new ArrayList<>(), storeNames);
+        }).collect(Collectors.toList());
   }
 }
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxy.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxy.java
index 54da8c9..46bf385 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxy.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxy.java
@@ -22,8 +22,6 @@
 import java.util.List;
 import org.apache.samza.rest.model.Task;
 import org.apache.samza.rest.proxy.job.JobInstance;
-import org.apache.samza.rest.proxy.job.JobProxy;
-import org.apache.samza.rest.resources.JobsResourceConfig;
 
 
 /**
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxyFactory.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxyFactory.java
index 5e9957f..639e983 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxyFactory.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskProxyFactory.java
@@ -18,8 +18,6 @@
  */
 package org.apache.samza.rest.proxy.task;
 
-import org.apache.samza.config.Config;
-
 
 /**
  * Factory interface that will be used to create {@link TaskProxy}
@@ -33,8 +31,7 @@
 public interface TaskProxyFactory {
 
   /**
-   *
-   * @param config the {@link Config} to pass to the proxy.
+   * @param config the {@link TaskResourceConfig} to pass to the proxy.
    * @return the created proxy.
    */
   TaskProxy getTaskProxy(TaskResourceConfig config);
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskResourceConfig.java b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskResourceConfig.java
index 40cf706..401d4ac 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskResourceConfig.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/proxy/task/TaskResourceConfig.java
@@ -20,11 +20,10 @@
 
 import org.apache.samza.config.Config;
 import org.apache.samza.rest.resources.BaseResourceConfig;
-import org.apache.samza.rest.resources.TasksResource;
 
 
 /**
- * Configurations for the {@link TasksResource} endpoint.
+ * Configurations for the {@link org.apache.samza.rest.resources.TasksResource} endpoint.
  */
 public class TaskResourceConfig extends BaseResourceConfig {
 
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/resources/BaseResourceConfig.java b/samza-rest/src/main/java/org/apache/samza/rest/resources/BaseResourceConfig.java
index eca8fdc..1c4d95a 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/resources/BaseResourceConfig.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/resources/BaseResourceConfig.java
@@ -21,7 +21,6 @@
 import org.apache.samza.config.Config;
 import org.apache.samza.config.MapConfig;
 import org.apache.samza.config.factories.PropertiesConfigFactory;
-import org.apache.samza.rest.proxy.installation.InstallationRecord;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,7 +40,8 @@
 
   /**
    * The path where all the Samza jobs are installed (unzipped). Each subdirectory of this path
-   * is expected to be a Samza job installation and corresponds to one {@link InstallationRecord}.
+   * is expected to be a Samza job installation and corresponds to one
+   * {@link org.apache.samza.rest.proxy.installation.InstallationRecord}.
    */
   public static final String CONFIG_JOB_INSTALLATIONS_PATH = "job.installations.path";
 
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResource.java b/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResource.java
index caad56c..d79e2fa 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResource.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResource.java
@@ -33,7 +33,6 @@
 import org.apache.samza.rest.proxy.job.AbstractJobProxy;
 import org.apache.samza.rest.proxy.job.JobInstance;
 import org.apache.samza.rest.proxy.job.JobProxy;
-import org.apache.samza.rest.proxy.job.JobProxyFactory;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,9 +50,9 @@
 
   /**
    * Initializes a JobResource with {@link JobProxy} from the
-   * {@link JobProxyFactory} class specified in the configuration.
+   * {@link org.apache.samza.rest.proxy.job.JobProxyFactory} class specified in the configuration.
    *
-   * @param config  the configuration containing the {@link JobProxyFactory} class.
+   * @param config  the configuration containing the {@link org.apache.samza.rest.proxy.job.JobProxyFactory} class.
    */
   public JobsResource(JobsResourceConfig config) {
     jobProxy = AbstractJobProxy.fromFactory(config);
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResourceConfig.java b/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResourceConfig.java
index bd52e65..7748fee 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResourceConfig.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/resources/JobsResourceConfig.java
@@ -19,8 +19,6 @@
 package org.apache.samza.rest.resources;
 
 import org.apache.samza.config.Config;
-import org.apache.samza.rest.proxy.job.JobProxy;
-import org.apache.samza.rest.proxy.job.JobProxyFactory;
 
 
 /**
@@ -29,8 +27,8 @@
 public class JobsResourceConfig extends BaseResourceConfig {
 
   /**
-   * Specifies the canonical name of the {@link JobProxyFactory} class to produce
-   * {@link JobProxy} instances.
+   * Specifies the canonical name of the {@link org.apache.samza.rest.proxy.job.JobProxyFactory} class to produce
+   * {@link org.apache.samza.rest.proxy.job.JobProxy} instances.
    *
    * To use your own proxy, implement the factory and specify the class for this config.
    */
@@ -42,7 +40,8 @@
 
   /**
    * @see JobsResourceConfig#CONFIG_JOB_PROXY_FACTORY
-   * @return the canonical name of the {@link JobProxyFactory} class to produce {@link JobProxy} instances.
+   * @return the canonical name of the {@link org.apache.samza.rest.proxy.job.JobProxyFactory} class to produce
+   * {@link org.apache.samza.rest.proxy.job.JobProxy} instances.
    */
   public String getJobProxyFactory() {
     return get(CONFIG_JOB_PROXY_FACTORY);
diff --git a/samza-rest/src/main/java/org/apache/samza/rest/resources/TasksResource.java b/samza-rest/src/main/java/org/apache/samza/rest/resources/TasksResource.java
index ff6cc26..12db787 100644
--- a/samza-rest/src/main/java/org/apache/samza/rest/resources/TasksResource.java
+++ b/samza-rest/src/main/java/org/apache/samza/rest/resources/TasksResource.java
@@ -28,7 +28,6 @@
 import javax.ws.rs.core.Response;
 import org.apache.commons.lang.StringUtils;
 import org.apache.samza.SamzaException;
-import org.apache.samza.rest.model.Task;
 import org.apache.samza.rest.proxy.job.JobInstance;
 import org.apache.samza.rest.proxy.task.TaskProxyFactory;
 import org.apache.samza.rest.proxy.task.TaskProxy;
@@ -68,11 +67,11 @@
   }
 
   /**
-   * Gets the list of {@link Task} for the job instance specified by jobName and jobId.
+   * Gets the list of {@link org.apache.samza.rest.model.Task} for the job instance specified by jobName and jobId.
    * @param jobName the name of the job as configured in {@link org.apache.samza.config.JobConfig#JOB_NAME}
    * @param jobId the id of the job as configured in {@link org.apache.samza.config.JobConfig#JOB_ID}.
    * @return a {@link javax.ws.rs.core.Response.Status#OK} {@link javax.ws.rs.core.Response}
-   *         contains a list of {@link Task}, where each task belongs to
+   *         contains a list of {@link org.apache.samza.rest.model.Task}, where each task belongs to
    *         the samza job. {@link javax.ws.rs.core.Response.Status#BAD_REQUEST} is returned for invalid
    *         job instances.
    */
diff --git a/samza-rest/src/test/java/org/apache/samza/monitor/TestLocalStoreMonitor.java b/samza-rest/src/test/java/org/apache/samza/monitor/TestLocalStoreMonitor.java
index 15f0df9..972598f 100644
--- a/samza-rest/src/test/java/org/apache/samza/monitor/TestLocalStoreMonitor.java
+++ b/samza-rest/src/test/java/org/apache/samza/monitor/TestLocalStoreMonitor.java
@@ -46,7 +46,7 @@
 
 public class TestLocalStoreMonitor {
 
-  private static Logger LOG = LoggerFactory.getLogger(TestLocalStoreMonitor.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestLocalStoreMonitor.class);
 
   private File localStoreDir;
 
diff --git a/samza-rest/src/test/java/org/apache/samza/monitor/TestMonitorService.java b/samza-rest/src/test/java/org/apache/samza/monitor/TestMonitorService.java
index c7a991b..b2924fc 100644
--- a/samza-rest/src/test/java/org/apache/samza/monitor/TestMonitorService.java
+++ b/samza-rest/src/test/java/org/apache/samza/monitor/TestMonitorService.java
@@ -46,137 +46,136 @@
 
 public class TestMonitorService {
 
-    private static final MetricsRegistry METRICS_REGISTRY = new NoOpMetricsRegistry();
+  private static final MetricsRegistry METRICS_REGISTRY = new NoOpMetricsRegistry();
 
-    @Test
-    public void testMonitorsShouldBeInstantiatedProperly() {
-        // Test that a monitor should be instantiated properly by invoking
-        // the appropriate factory method.
-        Map<String, String> configMap = ImmutableMap.of(CONFIG_MONITOR_FACTORY_CLASS,
-                                                        DummyMonitorFactory.class.getCanonicalName());
-        Monitor monitor = null;
+  @Test
+  public void testMonitorsShouldBeInstantiatedProperly() {
+    // Test that a monitor should be instantiated properly by invoking
+    // the appropriate factory method.
+    Map<String, String> configMap = ImmutableMap.of(CONFIG_MONITOR_FACTORY_CLASS,
+                                                    DummyMonitorFactory.class.getCanonicalName());
+    Monitor monitor = null;
+    try {
+      monitor = MonitorLoader.instantiateMonitor("testMonitor", new MonitorConfig(new MapConfig(configMap)),
+          METRICS_REGISTRY);
+    } catch (InstantiationException e) {
+      fail();
+    }
+    assertNotNull(monitor);
+    // Object should implement the monitor().
+    try {
+      monitor.monitor();
+    } catch (Exception e) {
+      fail();
+    }
+  }
+
+  @Test
+  public void testShouldGroupRelevantMonitorConfigTogether() {
+    // Test that Monitor Loader groups relevant config together.
+    Map<String, String> firstMonitorConfig = ImmutableMap.of("monitor.monitor1.factory.class",
+                                                             "org.apache.samza.monitor.DummyMonitor",
+                                                             "monitor.monitor1.scheduling.interval.ms",
+                                                             "100");
+    Map<String, String> secondMonitorConfig = ImmutableMap.of("monitor.monitor2.factory.class",
+                                                              "org.apache.samza.monitor.DummyMonitor",
+                                                              "monitor.monitor2.scheduling.interval.ms",
+                                                              "200");
+    MapConfig mapConfig = new MapConfig(ImmutableList.of(firstMonitorConfig, secondMonitorConfig));
+    MonitorConfig expectedFirstConfig = new MonitorConfig(new MapConfig(firstMonitorConfig).subset("monitor.monitor1."));
+    MonitorConfig expectedSecondConfig = new MonitorConfig(new MapConfig(secondMonitorConfig).subset("monitor.monitor2."));
+    Map<String, MonitorConfig> expected = ImmutableMap.of("monitor1", expectedFirstConfig, "monitor2", expectedSecondConfig);
+    assertEquals(expected, MonitorConfig.getMonitorConfigs(mapConfig));
+  }
+
+  @Test
+  public void testMonitorExceptionIsolation() {
+    // Test that an exception from a monitor doesn't bubble up out of the scheduler.
+    Map<String, String> configMap =
+        ImmutableMap.of(String.format("monitor.name.%s", CONFIG_MONITOR_FACTORY_CLASS),
+                        ExceptionThrowingMonitorFactory.class.getCanonicalName());
+    SamzaRestConfig config = new SamzaRestConfig(new MapConfig(configMap));
+    SamzaMonitorService monitorService = new SamzaMonitorService(config,
+                                                                 METRICS_REGISTRY,
+                                                                 new InstantSchedulingProvider());
+
+    // This will throw if the exception isn't caught within the provider.
+    monitorService.start();
+    monitorService.stop();
+  }
+
+  @Test
+  public void testShouldNotFailWhenTheMonitorFactoryClassIsNotDefined()
+      throws Exception {
+    // Test that when MonitorFactoryClass is not defined in the config, monitor service
+    // should not fail.
+    Map<String, String> configMap = ImmutableMap.of("monitor.monitor1.config.key1", "configValue1",
+                                                    "monitor.monitor1.config.key2", "configValue2",
+                                                    String.format("monitor.MOCK_MONITOR.%s", CONFIG_MONITOR_FACTORY_CLASS),
+                                                    MockMonitorFactory.class.getCanonicalName());
+
+    SamzaRestConfig config = new SamzaRestConfig(new MapConfig(configMap));
+    SamzaMonitorService monitorService = new SamzaMonitorService(config,
+                                                                 METRICS_REGISTRY,
+                                                                 new InstantSchedulingProvider());
+    try {
+      monitorService.start();
+    } catch (Exception e) {
+      fail();
+    }
+    Mockito.verify(MockMonitorFactory.MOCK_MONITOR, Mockito.times(1)).monitor();
+  }
+
+  @Test(expected = SamzaException.class)
+  public void testShouldFailWhenTheMonitorFactoryClassIsInvalid() {
+    // Test that when MonitorFactoryClass is defined in the config and is invalid,
+    // monitor service should fail. Should throw back SamzaException.
+    Map<String, String> configMap = ImmutableMap.of(String.format("monitor.name.%s", CONFIG_MONITOR_FACTORY_CLASS),
+                                                    "RandomClassName");
+    SamzaRestConfig config = new SamzaRestConfig(new MapConfig(configMap));
+    SamzaMonitorService monitorService = new SamzaMonitorService(config,
+                                                                 METRICS_REGISTRY,
+                                                                 new InstantSchedulingProvider());
+    monitorService.start();
+  }
+
+  @Test
+  public void testScheduledExecutorSchedulingProvider() {
+    // Test that the monitor is scheduled by the ScheduledExecutorSchedulingProvider
+    ScheduledExecutorService executorService = Executors.newScheduledThreadPool(1);
+    ScheduledExecutorSchedulingProvider provider = new ScheduledExecutorSchedulingProvider(executorService);
+
+    // notifyingMonitor.monitor() should be called repeatedly.
+    final CountDownLatch wasCalledLatch = new CountDownLatch(3);
+
+    final Monitor notifyingMonitor = new Monitor() {
+      @Override
+      public void monitor() {
+        wasCalledLatch.countDown();
+      }
+    };
+
+    Runnable runnableMonitor = new Runnable() {
+      public void run() {
         try {
-            monitor = MonitorLoader.instantiateMonitor("testMonitor", new MonitorConfig(new MapConfig(configMap)),
-                METRICS_REGISTRY);
-        } catch (InstantiationException e) {
-            fail();
-        }
-        assertNotNull(monitor);
-        // Object should implement the monitor().
-        try {
-            monitor.monitor();
+          notifyingMonitor.monitor();
         } catch (Exception e) {
-            fail();
+          // Must be caught because they are checked in monitor()
+          fail();
         }
+      }
+    };
+
+    // monitor should get called every 1ms, so if await() misses the first call, there will be more.
+    provider.schedule(runnableMonitor, 1);
+
+    try {
+      assertTrue(wasCalledLatch.await(5L, TimeUnit.SECONDS));
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+    } finally {
+      executorService.shutdownNow();
     }
 
-    @Test
-    public void testShouldGroupRelevantMonitorConfigTogether() {
-        // Test that Monitor Loader groups relevant config together.
-        Map<String, String> firstMonitorConfig = ImmutableMap.of("monitor.monitor1.factory.class",
-                                                                 "org.apache.samza.monitor.DummyMonitor",
-                                                                 "monitor.monitor1.scheduling.interval.ms",
-                                                                 "100");
-        Map<String, String> secondMonitorConfig = ImmutableMap.of("monitor.monitor2.factory.class",
-                                                                  "org.apache.samza.monitor.DummyMonitor",
-                                                                  "monitor.monitor2.scheduling.interval.ms",
-                                                                  "200");
-        MapConfig mapConfig = new MapConfig(ImmutableList.of(firstMonitorConfig, secondMonitorConfig));
-        MonitorConfig expectedFirstConfig = new MonitorConfig(new MapConfig(firstMonitorConfig).subset("monitor.monitor1."));
-        MonitorConfig expectedSecondConfig = new MonitorConfig(new MapConfig(secondMonitorConfig).subset("monitor.monitor2."));
-        Map<String, MonitorConfig> expected = ImmutableMap.of("monitor1", expectedFirstConfig, "monitor2", expectedSecondConfig);
-        assertEquals(expected, MonitorConfig.getMonitorConfigs(mapConfig));
-    }
-
-    @Test
-    public void testMonitorExceptionIsolation() {
-        // Test that an exception from a monitor doesn't bubble up out of the scheduler.
-        Map<String, String> configMap =
-            ImmutableMap.of(String.format("monitor.name.%s", CONFIG_MONITOR_FACTORY_CLASS),
-                            ExceptionThrowingMonitorFactory.class.getCanonicalName());
-        SamzaRestConfig config = new SamzaRestConfig(new MapConfig(configMap));
-        SamzaMonitorService monitorService = new SamzaMonitorService(config,
-                                                                     METRICS_REGISTRY,
-                                                                     new InstantSchedulingProvider());
-
-        // This will throw if the exception isn't caught within the provider.
-        monitorService.start();
-        monitorService.stop();
-    }
-
-    @Test
-    public void testShouldNotFailWhenTheMonitorFactoryClassIsNotDefined()
-        throws Exception {
-        // Test that when MonitorFactoryClass is not defined in the config, monitor service
-        // should not fail.
-        Map<String, String> configMap = ImmutableMap.of("monitor.monitor1.config.key1", "configValue1",
-                                                        "monitor.monitor1.config.key2", "configValue2",
-                                                        String.format("monitor.MOCK_MONITOR.%s", CONFIG_MONITOR_FACTORY_CLASS),
-                                                        MockMonitorFactory.class.getCanonicalName());
-
-        SamzaRestConfig config = new SamzaRestConfig(new MapConfig(configMap));
-        SamzaMonitorService monitorService = new SamzaMonitorService(config,
-                                                                     METRICS_REGISTRY,
-                                                                     new InstantSchedulingProvider());
-        try {
-            monitorService.start();
-        } catch (Exception e) {
-            fail();
-        }
-        Mockito.verify(MockMonitorFactory.MOCK_MONITOR, Mockito.times(1)).monitor();
-    }
-
-    @Test(expected = SamzaException.class)
-    public void testShouldFailWhenTheMonitorFactoryClassIsInvalid() {
-        // Test that when MonitorFactoryClass is defined in the config and is invalid,
-        // monitor service should fail. Should throw back SamzaException.
-        Map<String, String> configMap = ImmutableMap.of(String.format("monitor.name.%s", CONFIG_MONITOR_FACTORY_CLASS),
-                                                        "RandomClassName");
-        SamzaRestConfig config = new SamzaRestConfig(new MapConfig(configMap));
-        SamzaMonitorService monitorService = new SamzaMonitorService(config,
-                                                                     METRICS_REGISTRY,
-                                                                     new InstantSchedulingProvider());
-        monitorService.start();
-    }
-
-    @Test
-    public void testScheduledExecutorSchedulingProvider() {
-        // Test that the monitor is scheduled by the ScheduledExecutorSchedulingProvider
-        ScheduledExecutorService executorService = Executors.newScheduledThreadPool(1);
-        ScheduledExecutorSchedulingProvider provider =
-                new ScheduledExecutorSchedulingProvider(executorService);
-
-        // notifyingMonitor.monitor() should be called repeatedly.
-        final CountDownLatch wasCalledLatch = new CountDownLatch(3);
-
-        final Monitor notifyingMonitor = new Monitor() {
-            @Override
-            public void monitor() {
-                wasCalledLatch.countDown();
-            }
-        };
-
-        Runnable runnableMonitor = new Runnable() {
-            public void run() {
-                try {
-                    notifyingMonitor.monitor();
-                } catch (Exception e) {
-                    // Must be caught because they are checked in monitor()
-                    fail();
-                }
-            }
-        };
-
-        // monitor should get called every 1ms, so if await() misses the first call, there will be more.
-        provider.schedule(runnableMonitor, 1);
-
-        try {
-            assertTrue(wasCalledLatch.await(5l, TimeUnit.SECONDS));
-        } catch (InterruptedException e) {
-            Thread.currentThread().interrupt();
-        } finally {
-            executorService.shutdownNow();
-        }
-
-    }
+  }
 }
\ No newline at end of file
diff --git a/samza-rest/src/test/java/org/apache/samza/monitor/mock/DummyMonitor.java b/samza-rest/src/test/java/org/apache/samza/monitor/mock/DummyMonitor.java
index c6a2b28..1b28289 100644
--- a/samza-rest/src/test/java/org/apache/samza/monitor/mock/DummyMonitor.java
+++ b/samza-rest/src/test/java/org/apache/samza/monitor/mock/DummyMonitor.java
@@ -22,8 +22,8 @@
 
 public class DummyMonitor implements Monitor {
 
-    @Override
-    public void monitor() {
-        // Do nothing!
-    }
+  @Override
+  public void monitor() {
+    // Do nothing!
+  }
 }
diff --git a/samza-rest/src/test/java/org/apache/samza/monitor/mock/ExceptionThrowingMonitor.java b/samza-rest/src/test/java/org/apache/samza/monitor/mock/ExceptionThrowingMonitor.java
index 035e2ed..aa1bcda 100644
--- a/samza-rest/src/test/java/org/apache/samza/monitor/mock/ExceptionThrowingMonitor.java
+++ b/samza-rest/src/test/java/org/apache/samza/monitor/mock/ExceptionThrowingMonitor.java
@@ -24,8 +24,8 @@
 
 public class ExceptionThrowingMonitor implements Monitor {
 
-    @Override
-    public void monitor() throws IOException {
-        throw new IOException("I don't know what I was expecting.");
-    }
+  @Override
+  public void monitor() throws IOException {
+    throw new IOException("I don't know what I was expecting.");
+  }
 }
diff --git a/samza-rest/src/test/java/org/apache/samza/monitor/mock/InstantSchedulingProvider.java b/samza-rest/src/test/java/org/apache/samza/monitor/mock/InstantSchedulingProvider.java
index 6ae80e6..5c06911 100644
--- a/samza-rest/src/test/java/org/apache/samza/monitor/mock/InstantSchedulingProvider.java
+++ b/samza-rest/src/test/java/org/apache/samza/monitor/mock/InstantSchedulingProvider.java
@@ -25,10 +25,10 @@
  */
 public class InstantSchedulingProvider implements SchedulingProvider {
 
-    public void schedule(Runnable runnableMonitor, int interval) {
-        runnableMonitor.run();
-    }
+  public void schedule(Runnable runnableMonitor, int interval) {
+    runnableMonitor.run();
+  }
 
-    // Nothing to stop because no deferred task was started
-    public void stop() {}
+  // Nothing to stop because no deferred task was started
+  public void stop() {}
 }
diff --git a/samza-rest/src/test/java/org/apache/samza/rest/resources/BaseJerseyTest.java b/samza-rest/src/test/java/org/apache/samza/rest/resources/BaseJerseyTest.java
new file mode 100644
index 0000000..5cd768c
--- /dev/null
+++ b/samza-rest/src/test/java/org/apache/samza/rest/resources/BaseJerseyTest.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.rest.resources;
+
+import org.glassfish.jersey.test.JerseyTest;
+import org.glassfish.jersey.test.TestProperties;
+
+
+public class BaseJerseyTest extends JerseyTest {
+
+  public BaseJerseyTest() {
+    // Configure port as 0 to let Jersey test HTTP server choose an available port dynamically
+    forceSet(TestProperties.CONTAINER_PORT, "0");
+  }
+}
diff --git a/samza-rest/src/test/java/org/apache/samza/rest/resources/TestJobsResource.java b/samza-rest/src/test/java/org/apache/samza/rest/resources/TestJobsResource.java
index 2a051c4..6e53d84 100644
--- a/samza-rest/src/test/java/org/apache/samza/rest/resources/TestJobsResource.java
+++ b/samza-rest/src/test/java/org/apache/samza/rest/resources/TestJobsResource.java
@@ -36,7 +36,6 @@
 import org.apache.samza.serializers.model.SamzaObjectMapper;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.type.TypeReference;
-import org.glassfish.jersey.test.JerseyTest;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
@@ -45,7 +44,7 @@
 import static org.junit.Assert.assertTrue;
 
 
-public class TestJobsResource extends JerseyTest {
+public class TestJobsResource extends BaseJerseyTest {
   ObjectMapper objectMapper = SamzaObjectMapper.getObjectMapper();
 
   @Override
@@ -126,7 +125,7 @@
     Response resp = target(String.format("v1/jobs/%s/%s", "BadJobName", MockJobProxy.JOB_INSTANCE_2_ID)).request().get();
     assertEquals(404, resp.getStatus());
 
-    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() {});
+    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() { });
     assertTrue(errorMessage.get("message"), errorMessage.get("message").contains("does not exist"));
     resp.close();
   }
@@ -137,7 +136,7 @@
     Response resp = target(String.format("v1/jobs/%s/%s", MockJobProxy.JOB_INSTANCE_2_NAME, "BadJobId")).request().get();
     assertEquals(404, resp.getStatus());
 
-    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() {});
+    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() { });
     assertTrue(errorMessage.get("message"), errorMessage.get("message").contains("does not exist"));
     resp.close();
   }
@@ -185,7 +184,7 @@
         .queryParam("status", "BADSTATUS").request().put(Entity.form(new Form()));
     assertEquals(400, resp.getStatus());
 
-    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() {});
+    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() { });
     assertTrue(errorMessage.get("message").contains("BADSTATUS"));
     resp.close();
   }
@@ -197,7 +196,7 @@
         .put(Entity.form(new Form()));
     assertEquals(400, resp.getStatus());
 
-    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() {});
+    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() { });
     assertTrue(errorMessage.get("message").contains("status"));
     resp.close();
   }
diff --git a/samza-rest/src/test/java/org/apache/samza/rest/resources/TestTasksResource.java b/samza-rest/src/test/java/org/apache/samza/rest/resources/TestTasksResource.java
index a9dbfcf..ecb54ad 100644
--- a/samza-rest/src/test/java/org/apache/samza/rest/resources/TestTasksResource.java
+++ b/samza-rest/src/test/java/org/apache/samza/rest/resources/TestTasksResource.java
@@ -35,14 +35,13 @@
 import org.apache.samza.serializers.model.SamzaObjectMapper;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.type.TypeReference;
-import org.glassfish.jersey.test.JerseyTest;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 
-public class TestTasksResource extends JerseyTest {
+public class TestTasksResource extends BaseJerseyTest {
   private ObjectMapper objectMapper = SamzaObjectMapper.getObjectMapper();
 
   @Override
@@ -79,7 +78,7 @@
     String requestUrl = String.format("v1/jobs/%s/%s/tasks", "BadJobName", MockJobProxy.JOB_INSTANCE_4_ID);
     Response resp = target(requestUrl).request().get();
     assertEquals(400, resp.getStatus());
-    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() {});
+    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() { });
     assertTrue(errorMessage.get("message"), errorMessage.get("message").contains("Invalid arguments for getTasks. "));
     resp.close();
   }
@@ -89,7 +88,7 @@
     String requestUrl = String.format("v1/jobs/%s/%s/tasks", MockJobProxy.JOB_INSTANCE_1_NAME, "BadJobId");
     Response resp = target(requestUrl).request().get();
     assertEquals(400, resp.getStatus());
-    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() {});
+    final Map<String, String> errorMessage = objectMapper.readValue(resp.readEntity(String.class), new TypeReference<Map<String, String>>() { });
     assertTrue(errorMessage.get("message"), errorMessage.get("message").contains("Invalid arguments for getTasks. "));
     resp.close();
   }
diff --git a/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobProxyFactory.java b/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobProxyFactory.java
index 03e95b1..eeb681d 100644
--- a/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobProxyFactory.java
+++ b/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobProxyFactory.java
@@ -23,7 +23,7 @@
 import org.apache.samza.rest.resources.JobsResourceConfig;
 
 
-public class MockJobProxyFactory implements JobProxyFactory{
+public class MockJobProxyFactory implements JobProxyFactory {
   @Override
   public JobProxy getJobProxy(JobsResourceConfig config) {
     return new MockJobProxy(config);
diff --git a/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobStatusProvider.java b/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobStatusProvider.java
index df0f18a..d02b88f 100644
--- a/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobStatusProvider.java
+++ b/samza-rest/src/test/java/org/apache/samza/rest/resources/mock/MockJobStatusProvider.java
@@ -30,9 +30,9 @@
   @Override
   public void getJobStatuses(Collection<Job> jobs)
       throws IOException, InterruptedException {
-     for (Job info : jobs) {
-       setStatusStarted(info);
-     }
+    for (Job info : jobs) {
+      setStatusStarted(info);
+    }
   }
 
   @Override
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliConstants.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliConstants.java
index ead0e9a..7b255d8 100755
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliConstants.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliConstants.java
@@ -37,19 +37,19 @@
 
   public static final String WELCOME_MESSAGE;
   static {
-        WELCOME_MESSAGE =
-"      ___           ___           ___           ___           ___ \n" +
-"     /  /\\         /  /\\         /  /\\         /__/\\         /  /\\ \n" +
-"    /  /::\\       /  /::\\       /  /::|        \\  \\:\\       /  /::\\ \n"+
-"   /__/:/\\:\\     /  /:/\\:\\     /  /:|:|         \\  \\:\\     /  /:/\\:\\ \n"+
-"  _\\_ \\:\\ \\:\\   /  /::\\ \\:\\   /  /:/|:|__        \\  \\:\\   /  /::\\ \\:\\ \n"+
-" /__/\\ \\:\\ \\:\\ /__/:/\\:\\_\\:\\ /__/:/_|::::\\  ______\\__\\:\\ /__/:/\\:\\_\\:\\ \n"+
-" \\  \\:\\ \\:\\_\\/ \\__\\/  \\:\\/:/ \\__\\/  /~~/:/ \\  \\::::::::/ \\__\\/  \\:\\/:/ \n"+
-"  \\  \\:\\_\\:\\        \\__\\::/        /  /:/   \\  \\:\\~~~~~       \\__\\::/ \n"+
-"   \\  \\:\\/:/        /  /:/        /  /:/     \\  \\:\\           /  /:/ \n"+
-"    \\  \\::/        /__/:/        /__/:/       \\  \\:\\         /__/:/ \n"+
-"     \\__\\/         \\__\\/         \\__\\/         \\__\\/         \\__\\/  \n\n"+
-"Welcome to Samza SQL shell. Enter HELP for all commands.\n";
+    WELCOME_MESSAGE =
+      "      ___           ___           ___           ___           ___ \n" +
+      "     /  /\\         /  /\\         /  /\\         /__/\\         /  /\\ \n" +
+      "    /  /::\\       /  /::\\       /  /::|        \\  \\:\\       /  /::\\ \n" +
+      "   /__/:/\\:\\     /  /:/\\:\\     /  /:|:|         \\  \\:\\     /  /:/\\:\\ \n" +
+      "  _\\_ \\:\\ \\:\\   /  /::\\ \\:\\   /  /:/|:|__        \\  \\:\\   /  /::\\ \\:\\ \n" +
+      " /__/\\ \\:\\ \\:\\ /__/:/\\:\\_\\:\\ /__/:/_|::::\\  ______\\__\\:\\ /__/:/\\:\\_\\:\\ \n" +
+      " \\  \\:\\ \\:\\_\\/ \\__\\/  \\:\\/:/ \\__\\/  /~~/:/ \\  \\::::::::/ \\__\\/  \\:\\/:/ \n" +
+      "  \\  \\:\\_\\:\\        \\__\\::/        /  /:/   \\  \\:\\~~~~~       \\__\\::/ \n" +
+      "   \\  \\:\\/:/        /  /:/        /  /:/     \\  \\:\\           /  /:/ \n" +
+      "    \\  \\::/        /__/:/        /__/:/       \\  \\:\\         /__/:/ \n" +
+      "     \\__\\/         \\__\\/         \\__\\/         \\__\\/         \\__\\/  \n\n" +
+      "Welcome to Samza SQL shell. Enter HELP for all commands.\n";
   }
 
   public static final char SPACE = '\u0020';
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliEnvironment.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliEnvironment.java
index 35d3626..496afd9 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliEnvironment.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliEnvironment.java
@@ -69,7 +69,7 @@
    */
   public int setEnvironmentVariable(String name, String value) throws ExecutorException, CommandHandlerException {
     name = name.toLowerCase();
-    if(name.equals(CliConstants.CONFIG_EXECUTOR)) {
+    if (name.equals(CliConstants.CONFIG_EXECUTOR)) {
       createShellExecutor(value);
       activeExecutorClassName = value;
       executorEnvHandler = executor.getEnvironmentVariableHandler();
@@ -85,9 +85,9 @@
     }
 
     EnvironmentVariableHandler handler = getAppropriateHandler(name);
-    if(handler == null) {
+    if (handler == null) {
       // Shell doesn't recognize this variable. There's no executor handler yet. Save for future executor
-      if(delayedExecutorVars == null) {
+      if (delayedExecutorVars == null) {
         delayedExecutorVars = new HashMap<>();
       }
       delayedExecutorVars.put(name, value);
@@ -103,12 +103,12 @@
    * @return value of the environment variable. Returns null if the variable is not set.
    */
   public String getEnvironmentVariable(String name) {
-    if(name.equalsIgnoreCase(CliConstants.CONFIG_EXECUTOR)) {
+    if (name.equalsIgnoreCase(CliConstants.CONFIG_EXECUTOR)) {
       return activeExecutorClassName;
     }
 
     EnvironmentVariableHandler handler = getAppropriateHandler(name);
-    if(handler == null)
+    if (handler == null)
       return null;
 
     return handler.getEnvironmentVariable(name);
@@ -120,10 +120,9 @@
    * @return An array of all possible valid values of the environment variable. Returns null
    * if the environment variable name given is invalid.
    */
-  public String[] getPossibleValues(String name)
-  {
+  public String[] getPossibleValues(String name) {
     EnvironmentVariableHandler handler = getAppropriateHandler(name);
-    if(handler == null)
+    if (handler == null)
       return null;
 
     EnvironmentVariableSpecs.Spec spec = handler.getSpecs().getSpec(name);
@@ -138,7 +137,7 @@
   public void printAll(PrintWriter writer) {
     printVariable(writer, CliConstants.CONFIG_EXECUTOR, activeExecutorClassName);
     printAll(writer, shellEnvHandler);
-    if(executorEnvHandler != null) {
+    if (executorEnvHandler != null) {
       printAll(writer, executorEnvHandler);
     }
   }
@@ -148,7 +147,7 @@
    * making default values take effect
    */
   public void finishInitialization() throws CliException {
-    if(executor == null) {
+    if (executor == null) {
       try {
         createShellExecutor(CliConstants.DEFAULT_EXECUTOR_CLASS);
         activeExecutorClassName = CliConstants.DEFAULT_EXECUTOR_CLASS;
@@ -175,10 +174,10 @@
    */
   private void finishInitialization(EnvironmentVariableHandler handler) {
     List<Pair<String, EnvironmentVariableSpecs.Spec>> list = handler.getSpecs().getAllSpecs();
-    for(Pair<String, EnvironmentVariableSpecs.Spec> pair : list) {
+    for (Pair<String, EnvironmentVariableSpecs.Spec> pair : list) {
       String name = pair.getL();
       EnvironmentVariableSpecs.Spec spec = pair.getR();
-      if(CliUtil.isNullOrEmpty(handler.getEnvironmentVariable(name))) {
+      if (CliUtil.isNullOrEmpty(handler.getEnvironmentVariable(name))) {
         handler.setEnvironmentVariable(name, spec.getDefaultValue());
       }
     }
@@ -196,7 +195,9 @@
    * retrieves the list of {@link CommandHandler}s setup in this environment
    * @return List of {@link CommandHandler}
    */
-  public List<CommandHandler> getCommandHandlers() { return commandHandlers; }
+  public List<CommandHandler> getCommandHandlers() {
+    return commandHandlers;
+  }
 
   private void createShellExecutor(String executorClassName) throws ExecutorException {
     try {
@@ -213,7 +214,7 @@
 
   private void createCommandHandler(String handlerClassName) throws CommandHandlerException {
     try {
-    commandHandlers.add((CommandHandler) createInstance(handlerClassName));
+      commandHandlers.add((CommandHandler) createInstance(handlerClassName));
     } catch (ClassCastException e) {
       String errMsg = String.format("Error trying to cast Object of class %s to CommandHandler", handlerClassName);
       LOG.error(errMsg);
@@ -253,7 +254,7 @@
    */
   private void printAll(PrintWriter writer, EnvironmentVariableHandler handler) {
     List<Pair<String, String>> shellEnvs = handler.getAllEnvironmentVariables();
-    for(Pair<String, String> pair : shellEnvs) {
+    for (Pair<String, String> pair : shellEnvs) {
       printVariable(writer, pair.getL(), pair.getR());
     }
   }
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliHighlighter.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliHighlighter.java
index c6ac6c7..81c7e50 100755
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliHighlighter.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliHighlighter.java
@@ -34,12 +34,12 @@
  * A primitive highlighter.
  */
 public class CliHighlighter implements Highlighter {
-  private static final List<String> keywords;
+  private static final List<String> KEYWORDS;
 
   static {
-    keywords = CliCommandType.getAllCommands();
-    keywords.add("FROM");
-    keywords.add("WHERE");
+    KEYWORDS = CliCommandType.getAllCommands();
+    KEYWORDS.add("FROM");
+    KEYWORDS.add("WHERE");
   }
 
   private static List<String> splitWithSpace(String buffer) {
@@ -80,7 +80,7 @@
   }
 
   private boolean isKeyword(String token) {
-    for (String keyword : keywords) {
+    for (String keyword : KEYWORDS) {
       if (keyword.compareToIgnoreCase(token) == 0)
         return true;
     }
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliShell.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliShell.java
index 51bb429..fb3e09a 100755
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliShell.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/CliShell.java
@@ -128,7 +128,7 @@
     clearScreen();
     writer.write(CliConstants.WELCOME_MESSAGE);
     printVersion();
-    if(!CliUtil.isNullOrEmpty(message)) {
+    if (!CliUtil.isNullOrEmpty(message)) {
       writer.println(message);
     }
     writer.println();
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/Main.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/Main.java
index 6ee2cf3..b2a5b95 100755
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/Main.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/Main.java
@@ -33,77 +33,77 @@
  * Main entry of the program.
  */
 public class Main {
-    private static final Logger LOG = LoggerFactory.getLogger(Main.class);
+  private static final Logger LOG = LoggerFactory.getLogger(Main.class);
 
-    public static void main(String[] args) {
-      // Get configuration file path
-      String configFilePath = null;
-      for(int i = 0; i < args.length; ++i) {
-        switch(args[i]) {
-          case "-conf":
-            if(i + 1 < args.length) {
-              configFilePath = args[i + 1];
-              i++;
-            }
-            break;
-          default:
-            LOG.warn("Unknown parameter {}", args[i]);
-            break;
-        }
-      }
-
-      CliEnvironment environment = new CliEnvironment();
-      StringBuilder messageBuilder = new StringBuilder();
-
-      if(!CliUtil.isNullOrEmpty(configFilePath)) {
-        LOG.info("Configuration file path is: {}", configFilePath);
-        try {
-          FileReader fileReader = new FileReader(configFilePath);
-          BufferedReader bufferedReader = new BufferedReader(fileReader);
-          String line;
-          while ((line = bufferedReader.readLine()) != null) {
-            if (line.startsWith("#") || line.startsWith("[")) {
-              continue;
-            }
-            String[] strs = line.split("=");
-            if (strs.length != 2) {
-              continue;
-            }
-            String key = strs[0].trim().toLowerCase();
-            String value = strs[1].trim();
-            try {
-              LOG.info("Configuration: setting {} = {}", key, value);
-              int result = environment.setEnvironmentVariable(key, value);
-              if (result == -1) { // CliEnvironment doesn't recognize the key.
-                LOG.warn("Unknowing shell environment variable: {}", key);
-              } else if (result == -2) { // Invalid value
-                LOG.warn("Unknowing shell environment value: {}", value);
-              }
-            } catch(ExecutorException e) {
-              messageBuilder.append("Warning: Failed to create executor: ").append(value).append('\n');
-              messageBuilder.append("Warning: Using default executor " + CliConstants.DEFAULT_EXECUTOR_CLASS);
-              LOG.error("Failed to create user specified executor {}", value, e);
-            } catch (CommandHandlerException e) {
-              messageBuilder.append("Warning: Failed to create CommandHandler: ").append(value).append('\n');
-              LOG.error("Failed to create user specified CommandHandler {}", value, e);
-            }
+  public static void main(String[] args) {
+    // Get configuration file path
+    String configFilePath = null;
+    for (int i = 0; i < args.length; ++i) {
+      switch (args[i]) {
+        case "-conf":
+          if (i + 1 < args.length) {
+            configFilePath = args[i + 1];
+            i++;
           }
-        } catch (IOException e) {
-          LOG.error("Error in opening and reading the configuration file {}", e.toString());
-        }
+          break;
+        default:
+          LOG.warn("Unknown parameter {}", args[i]);
+          break;
       }
-
-      environment.finishInitialization();
-      CliShell shell;
-      try {
-        shell = new CliShell(environment);
-      } catch (ExecutorException e) {
-        System.out.println("Unable to initialize executor. Shell must exit. ");
-        LOG.error("Unable to initialize executor.", e);
-        return;
-      }
-
-      shell.open(messageBuilder.toString());
     }
+
+    CliEnvironment environment = new CliEnvironment();
+    StringBuilder messageBuilder = new StringBuilder();
+
+    if (!CliUtil.isNullOrEmpty(configFilePath)) {
+      LOG.info("Configuration file path is: {}", configFilePath);
+      try {
+        FileReader fileReader = new FileReader(configFilePath);
+        BufferedReader bufferedReader = new BufferedReader(fileReader);
+        String line;
+        while ((line = bufferedReader.readLine()) != null) {
+          if (line.startsWith("#") || line.startsWith("[")) {
+            continue;
+          }
+          String[] strs = line.split("=");
+          if (strs.length != 2) {
+            continue;
+          }
+          String key = strs[0].trim().toLowerCase();
+          String value = strs[1].trim();
+          try {
+            LOG.info("Configuration: setting {} = {}", key, value);
+            int result = environment.setEnvironmentVariable(key, value);
+            if (result == -1) { // CliEnvironment doesn't recognize the key.
+              LOG.warn("Unknowing shell environment variable: {}", key);
+            } else if (result == -2) { // Invalid value
+              LOG.warn("Unknowing shell environment value: {}", value);
+            }
+          } catch (ExecutorException e) {
+            messageBuilder.append("Warning: Failed to create executor: ").append(value).append('\n');
+            messageBuilder.append("Warning: Using default executor " + CliConstants.DEFAULT_EXECUTOR_CLASS);
+            LOG.error("Failed to create user specified executor {}", value, e);
+          } catch (CommandHandlerException e) {
+            messageBuilder.append("Warning: Failed to create CommandHandler: ").append(value).append('\n');
+            LOG.error("Failed to create user specified CommandHandler {}", value, e);
+          }
+        }
+      } catch (IOException e) {
+        LOG.error("Error in opening and reading the configuration file {}", e.toString());
+      }
+    }
+
+    environment.finishInitialization();
+    CliShell shell;
+    try {
+      shell = new CliShell(environment);
+    } catch (ExecutorException e) {
+      System.out.println("Unable to initialize executor. Shell must exit. ");
+      LOG.error("Unable to initialize executor.", e);
+      return;
+    }
+
+    shell.open(messageBuilder.toString());
+  }
 }
 
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/QueryResultLogView.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/QueryResultLogView.java
index 6a8c787..8b43066 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/QueryResultLogView.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/cli/QueryResultLogView.java
@@ -62,7 +62,7 @@
 
   // -- implementation of CliView -------------------------------------------
 
-  public void open(CliShell shell, QueryResult queryResult) throws ExecutorException{
+  public void open(CliShell shell, QueryResult queryResult) throws ExecutorException {
     terminal = shell.getTerminal();
     executor = shell.getExecutor();
     exeContext = shell.getExeContext();
@@ -98,7 +98,7 @@
 
   // ------------------------------------------------------------------------
 
-  private void display() throws ExecutorException{
+  private void display() throws ExecutorException {
     updateTerminalSize();
     int rowsInBuffer = executor.getRowCount();
     if (rowsInBuffer <= 0 || paused) {
@@ -164,11 +164,11 @@
     TerminalStatus prevStatus = new TerminalStatus();
 
     // Signal handlers
-    prevStatus.handler_INT = terminal.handle(Terminal.Signal.INT, this::handleSignal);
-    prevStatus.handler_QUIT = terminal.handle(Terminal.Signal.QUIT, this::handleSignal);
-    prevStatus.handler_TSTP = terminal.handle(Terminal.Signal.TSTP, this::handleSignal);
-    prevStatus.handler_CONT = terminal.handle(Terminal.Signal.CONT, this::handleSignal);
-    prevStatus.handler_WINCH = terminal.handle(Terminal.Signal.WINCH, this::handleSignal);
+    prevStatus.handlerInt = terminal.handle(Terminal.Signal.INT, this::handleSignal);
+    prevStatus.handlerQuit = terminal.handle(Terminal.Signal.QUIT, this::handleSignal);
+    prevStatus.handlerTstp = terminal.handle(Terminal.Signal.TSTP, this::handleSignal);
+    prevStatus.handlerCont = terminal.handle(Terminal.Signal.CONT, this::handleSignal);
+    prevStatus.handlerWinch = terminal.handle(Terminal.Signal.WINCH, this::handleSignal);
 
     // Attributes
     prevStatus.attributes = terminal.getAttributes();
@@ -208,11 +208,11 @@
 
   private void restoreTerminal(TerminalStatus status) {
     // Signal handlers
-    terminal.handle(Terminal.Signal.INT, status.handler_INT);
-    terminal.handle(Terminal.Signal.QUIT, status.handler_QUIT);
-    terminal.handle(Terminal.Signal.TSTP, status.handler_TSTP);
-    terminal.handle(Terminal.Signal.CONT, status.handler_CONT);
-    terminal.handle(Terminal.Signal.WINCH, status.handler_WINCH);
+    terminal.handle(Terminal.Signal.INT, status.handlerInt);
+    terminal.handle(Terminal.Signal.QUIT, status.handlerQuit);
+    terminal.handle(Terminal.Signal.TSTP, status.handlerTstp);
+    terminal.handle(Terminal.Signal.CONT, status.handlerCont);
+    terminal.handle(Terminal.Signal.WINCH, status.handlerWinch);
 
     // Attributes
     terminal.setAttributes(status.attributes);
@@ -259,11 +259,11 @@
   }
 
   private static class TerminalStatus {
-    Terminal.SignalHandler handler_INT;
-    Terminal.SignalHandler handler_QUIT;
-    Terminal.SignalHandler handler_TSTP;
-    Terminal.SignalHandler handler_CONT;
-    Terminal.SignalHandler handler_WINCH;
+    Terminal.SignalHandler handlerInt;
+    Terminal.SignalHandler handlerQuit;
+    Terminal.SignalHandler handlerTstp;
+    Terminal.SignalHandler handlerCont;
+    Terminal.SignalHandler handlerWinch;
 
     Attributes attributes;
   }
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/CliCommandHandler.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/CliCommandHandler.java
index 4a89601..6ebfe8d 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/CliCommandHandler.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/CliCommandHandler.java
@@ -264,11 +264,11 @@
     }
     String[] params = null;
     boolean syntaxValid = param.split(" ").length == 1;
-    if(syntaxValid) {
+    if (syntaxValid) {
       params = param.split("=");
-      if(params.length == 1) {
+      if (params.length == 1) {
         String value = env.getEnvironmentVariable(param);
-        if(!CliUtil.isNullOrEmpty(value)) {
+        if (!CliUtil.isNullOrEmpty(value)) {
           env.printVariable(writer, param, value);
         }
         return;
@@ -276,7 +276,7 @@
         syntaxValid = params.length == 2;
       }
     }
-    if(!syntaxValid) {
+    if (!syntaxValid) {
       writer.println(command.getCommandType().getUsage());
       writer.flush();
       return;
@@ -289,7 +289,7 @@
       writer.print(name);
       writer.print(" set to ");
       writer.println(value);
-      if(name.equals(CliConstants.CONFIG_EXECUTOR)) {
+      if (name.equals(CliConstants.CONFIG_EXECUTOR)) {
         executor.stop(exeContext);
         executor = env.getExecutor();
         executor.start(exeContext);
@@ -301,7 +301,7 @@
       writer.print("Invalid value: ");
       writer.print(value);
       String[] vals = env.getPossibleValues(name);
-      if(vals != null && vals.length != 0) {
+      if (vals != null && vals.length != 0) {
         writer.print(" Possible values:");
         for (String s : vals) {
           writer.print(CliConstants.SPACE);
@@ -314,7 +314,7 @@
     writer.flush();
   }
 
-  private void commandExecuteFile(CliCommand command) throws ExecutorException{
+  private void commandExecuteFile(CliCommand command) throws ExecutorException {
     String fullCmdStr = command.getFullCommand();
     String parameters = command.getParameters();
     URI uri = null;
@@ -386,9 +386,9 @@
 
     execIds.sort(Integer::compareTo);
     final int terminalWidth = terminal.getWidth();
-    final int ID_WIDTH = 3;
-    final int STATUS_WIDTH = 20;
-    final int CMD_WIDTH = terminalWidth - ID_WIDTH - STATUS_WIDTH - 4;
+    final int idWidth = 3;
+    final int statusWidth = 20;
+    final int cmdWidth = terminalWidth - idWidth - statusWidth - 4;
 
     AttributedStyle oddLineStyle = AttributedStyle.DEFAULT.BOLD.foreground(AttributedStyle.BLUE);
     AttributedStyle evenLineStyle = AttributedStyle.DEFAULT.BOLD.foreground(AttributedStyle.CYAN);
@@ -414,12 +414,12 @@
         if (cmdStartIdx == 0) {
           line.append(CliConstants.SPACE);
           line.append(id);
-          CliUtil.appendTo(line, 1 + ID_WIDTH + 1, CliConstants.SPACE);
+          CliUtil.appendTo(line, 1 + idWidth + 1, CliConstants.SPACE);
           line.append(status);
         }
-        CliUtil.appendTo(line, 1 + ID_WIDTH + 1 + STATUS_WIDTH + 1, CliConstants.SPACE);
+        CliUtil.appendTo(line, 1 + idWidth + 1 + statusWidth + 1, CliConstants.SPACE);
 
-        int numToWrite = Math.min(CMD_WIDTH, cmdLength - cmdStartIdx);
+        int numToWrite = Math.min(cmdWidth, cmdLength - cmdStartIdx);
         if (numToWrite > 0) {
           line.append(cmd, cmdStartIdx, cmdStartIdx + numToWrite);
           cmdStartIdx += numToWrite;
@@ -460,7 +460,7 @@
     writer.flush();
   }
 
-  private void commandSelect(CliCommand command) throws ExecutorException{
+  private void commandSelect(CliCommand command) throws ExecutorException {
     QueryResult queryResult = executor.executeQuery(exeContext, command.getFullCommand());
     CliView view = new QueryResultLogView();
     view.open(shell, queryResult);
@@ -554,23 +554,23 @@
       -------------------------
   */
   private List<String> formatSchema4Display(SqlSchema schema) {
-    final String HEADER_FIELD = "Field";
-    final String HEADER_TYPE = "Type";
-    final char SEPERATOR = '|';
-    final char LINE_SEP = '-';
+    final String headerField = "Field";
+    final String headerType = "Type";
+    final char seperator = '|';
+    final char lineSep = '-';
 
     int terminalWidth = terminal.getWidth();
     // Two spaces * 2 plus one SEPERATOR
-    if (terminalWidth < 2 + 2 + 1 + HEADER_FIELD.length() + HEADER_TYPE.length()) {
+    if (terminalWidth < 2 + 2 + 1 + headerField.length() + headerType.length()) {
       return Collections.singletonList("Not enough room.");
     }
 
     // Find the best seperator position for least rows
-    int seperatorPos = HEADER_FIELD.length() + 2;
+    int seperatorPos = headerField.length() + 2;
     int minRowNeeded = Integer.MAX_VALUE;
     int longestLineCharNum = 0;
     int rowCount = schema.getFields().size();
-    for (int j = seperatorPos; j < terminalWidth - HEADER_TYPE.length() - 2; ++j) {
+    for (int j = seperatorPos; j < terminalWidth - headerType.length() - 2; ++j) {
       boolean fieldWrapped = false;
       int rowNeeded = 0;
       for (int i = 0; i < rowCount; ++i) {
@@ -601,14 +601,14 @@
     // Header
     StringBuilder line = new StringBuilder(terminalWidth);
     line.append(CliConstants.SPACE);
-    line.append(HEADER_FIELD);
+    line.append(headerField);
     CliUtil.appendTo(line, seperatorPos - 1, CliConstants.SPACE);
-    line.append(SEPERATOR);
+    line.append(seperator);
     line.append(CliConstants.SPACE);
-    line.append(HEADER_TYPE);
+    line.append(headerType);
     lines.add(line.toString());
     line = new StringBuilder(terminalWidth);
-    CliUtil.appendTo(line, longestLineCharNum - 1, LINE_SEP);
+    CliUtil.appendTo(line, longestLineCharNum - 1, lineSep);
     lines.add(line.toString());
 
     // Body
@@ -633,7 +633,7 @@
           fieldStartIdx += numToWrite;
         }
         CliUtil.appendTo(line, seperatorPos - 1, CliConstants.SPACE);
-        line.append(SEPERATOR);
+        line.append(seperator);
         line.append(CliConstants.SPACE);
 
         numToWrite = Math.min(typeColSize, typeLen - typeStartIdx);
@@ -656,7 +656,7 @@
 
     // Footer
     line = new StringBuilder(terminalWidth);
-    CliUtil.appendTo(line, longestLineCharNum - 1, LINE_SEP);
+    CliUtil.appendTo(line, longestLineCharNum - 1, lineSep);
     lines.add(line.toString());
     return lines;
   }
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/SamzaExecutor.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/SamzaExecutor.java
index e97a327..6770f93 100755
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/SamzaExecutor.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/impl/SamzaExecutor.java
@@ -19,7 +19,6 @@
 
 package org.apache.samza.sql.client.impl;
 
-import com.google.common.base.Joiner;
 import kafka.utils.ZkUtils;
 import org.I0Itec.zkclient.ZkClient;
 import org.I0Itec.zkclient.ZkConnection;
@@ -38,8 +37,6 @@
 import org.apache.samza.sql.client.util.RandomAccessQueue;
 import org.apache.samza.sql.dsl.SamzaSqlDslConverter;
 import org.apache.samza.sql.dsl.SamzaSqlDslConverterFactory;
-import org.apache.samza.sql.fn.FlattenUdf;
-import org.apache.samza.sql.fn.RegexMatchUdf;
 import org.apache.samza.sql.impl.ConfigBasedIOResolverFactory;
 import org.apache.samza.sql.interfaces.RelSchemaProvider;
 import org.apache.samza.sql.interfaces.RelSchemaProviderFactory;
@@ -119,7 +116,7 @@
   @Override
   public List<String> listTables(ExecutionContext context) throws ExecutorException {
     String address = environmentVariableHandler.getEnvironmentVariable(SAMZA_SQL_SYSTEM_KAFKA_ADDRESS);
-    if(address == null || address.isEmpty()) {
+    if (address == null || address.isEmpty()) {
       address = DEFAULT_SERVER_ADDRESS;
     }
     try {
@@ -147,9 +144,9 @@
       SqlIOResolver ioResolver = SamzaSqlApplicationConfig.createIOResolver(samzaSqlConfig);
       SqlIOConfig sourceInfo = ioResolver.fetchSourceInfo(tableName);
       RelSchemaProvider schemaProvider =
-              SamzaSqlApplicationConfig.initializePlugin("RelSchemaProvider", sourceInfo.getRelSchemaProviderName(),
-                      samzaSqlConfig, SamzaSqlApplicationConfig.CFG_FMT_REL_SCHEMA_PROVIDER_DOMAIN,
-                      (o, c) -> ((RelSchemaProviderFactory) o).create(sourceInfo.getSystemStream(), c));
+        SamzaSqlApplicationConfig.initializePlugin("RelSchemaProvider", sourceInfo.getRelSchemaProviderName(),
+          samzaSqlConfig, SamzaSqlApplicationConfig.CFG_FMT_REL_SCHEMA_PROVIDER_DOMAIN,
+          (o, c) -> ((RelSchemaProviderFactory) o).create(sourceInfo.getSystemStream(), c));
       sqlSchema =  schemaProvider.getSqlSchema();
     } catch (SamzaException ex) {
       throw new ExecutorException(ex);
@@ -158,7 +155,7 @@
   }
 
   @Override
-  public QueryResult executeQuery(ExecutionContext context, String statement) throws ExecutorException{
+  public QueryResult executeQuery(ExecutionContext context, String statement) throws ExecutorException {
     outputData.clear();
 
     int execId = execIdSeq.incrementAndGet();
@@ -206,7 +203,7 @@
   }
 
   @Override
-  public NonQueryResult executeNonQuery(ExecutionContext context, File sqlFile) throws ExecutorException{
+  public NonQueryResult executeNonQuery(ExecutionContext context, File sqlFile) throws ExecutorException {
     LOG.info("Sql file path: " + sqlFile.getPath());
     List<String> executedStmts;
     try {
@@ -246,7 +243,7 @@
   }
 
   @Override
-  public void stopExecution(ExecutionContext context, int exeId) throws ExecutorException{
+  public void stopExecution(ExecutionContext context, int exeId) throws ExecutorException {
     SamzaSqlApplicationRunner runner = executions.get(exeId);
     if (runner != null) {
       LOG.debug("Stopping execution ", exeId);
@@ -376,7 +373,7 @@
         "/tmp/schemas/");
 
     List<Pair<String, String>> allEnvironmentVariables = environmentVariableHandler.getAllEnvironmentVariables();
-    for(Pair<String, String> p : allEnvironmentVariables) {
+    for (Pair<String, String> p : allEnvironmentVariables) {
       staticConfigs.put(p.getL(), p.getR());
     }
 
@@ -448,7 +445,7 @@
       case UnsuccessfulFinish:
         return ExecutionStatus.UnsuccessfulFinish;
     }
-    throw new ExecutorException("Unsupported status code: "+ code);
+    throw new ExecutorException("Unsupported status code: " + code);
   }
 
   private String getPrettyFormat(OutgoingMessageEnvelope envelope) {
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/CommandType.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/CommandType.java
index 6e91d35..08a9d9f 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/CommandType.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/CommandType.java
@@ -31,7 +31,9 @@
   /**
    * @return list of names of all commands in this enumeration
    */
-  static List<String> getAllCommands() { return new ArrayList<>(); }
+  static List<String> getAllCommands() {
+    return new ArrayList<>();
+  }
 
   /**
    * returns the name of the command
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableHandlerImpl.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableHandlerImpl.java
index 141d8d2..5a231db 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableHandlerImpl.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableHandlerImpl.java
@@ -24,7 +24,7 @@
 
 import java.util.*;
 
-public abstract class EnvironmentVariableHandlerImpl implements EnvironmentVariableHandler{
+public abstract class EnvironmentVariableHandlerImpl implements EnvironmentVariableHandler {
   private EnvironmentVariableSpecs specs;
   protected Map<String, String> envVars = new HashMap<>();
 
@@ -35,8 +35,8 @@
   @Override
   public int setEnvironmentVariable(String name, String value) {
     EnvironmentVariableSpecs.Spec spec = specs.getSpec(name);
-    if(spec == null) {
-      if(isAcceptUnknowName()) {
+    if (spec == null) {
+      if (isAcceptUnknowName()) {
         return setEnvironmentVariableHelper(name, value);
       } else {
         return -1;
@@ -50,7 +50,7 @@
 
     for (String s : possibleValues) {
       if (s.equalsIgnoreCase(value)) {
-        if(!processEnvironmentVariable(name, value)) {
+        if (!processEnvironmentVariable(name, value)) {
           throw new CliException(); // should not reach here
         }
         envVars.put(name, value);
@@ -69,7 +69,7 @@
   public List<Pair<String, String>> getAllEnvironmentVariables() {
     List<Pair<String, String>> list = new ArrayList<>();
     Iterator<Map.Entry<String, String>> it =  envVars.entrySet().iterator();
-    while(it.hasNext()) {
+    while (it.hasNext()) {
       Map.Entry<String, String> entry = it.next();
       list.add(new Pair<>(entry.getKey(), entry.getValue()));
     }
@@ -106,7 +106,7 @@
   }
 
   private int setEnvironmentVariableHelper(String name, String value) {
-    if(processEnvironmentVariable(name, value)) {
+    if (processEnvironmentVariable(name, value)) {
       envVars.put(name, value);
       return 0;
     } else {
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableSpecs.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableSpecs.java
index 4d5060f..dd2b60f 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableSpecs.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/EnvironmentVariableSpecs.java
@@ -53,7 +53,7 @@
   public List<Pair<String, Spec>> getAllSpecs() {
     List<Pair<String, Spec>> list = new ArrayList<>();
     Iterator<Map.Entry<String, Spec>> it =  specMap.entrySet().iterator();
-    while(it.hasNext()) {
+    while (it.hasNext()) {
       Map.Entry<String, Spec> entry = it.next();
       list.add(new Pair<>(entry.getKey(), entry.getValue()));
     }
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/ExecutionContext.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/ExecutionContext.java
index ef7caf6..a6155f4 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/ExecutionContext.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/interfaces/ExecutionContext.java
@@ -19,8 +19,6 @@
 
 package org.apache.samza.sql.client.interfaces;
 
-import java.util.Map;
-
 /**
  * Whenever the shell calls the executor to execute a SQL statement, an object of ExecutionContext is passed.
  */
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/Pair.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/Pair.java
index 7d9c2f5..aa19633 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/Pair.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/Pair.java
@@ -19,20 +19,20 @@
 
 package org.apache.samza.sql.client.util;
 
-public class Pair<L,R> {
+public class Pair<L, R> {
   private L l;
   private R r;
 
-  public Pair(L l, R r){
+  public Pair(L l, R r) {
     this.l = l;
     this.r = r;
   }
 
-  public L getL(){
+  public L getL() {
     return l;
   }
 
-  public R getR(){
+  public R getR() {
     return r;
   }
 
@@ -40,7 +40,7 @@
     this.l = l;
   }
 
-  public void setR(R r){
+  public void setR(R r) {
     this.r = r;
   }
 }
\ No newline at end of file
diff --git a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/RandomAccessQueue.java b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/RandomAccessQueue.java
index a41021d..370d9d0 100644
--- a/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/RandomAccessQueue.java
+++ b/samza-sql-shell/src/main/java/org/apache/samza/sql/client/util/RandomAccessQueue.java
@@ -79,7 +79,7 @@
     List<T> rets = get(start, end);
     int upperBound = Math.min(end, size - 1);
     head = (end + 1) % capacity;
-    size -= (upperBound + 1);
+    size -= upperBound + 1;
     return rets;
   }
 
diff --git a/samza-sql-shell/src/test/java/org/apache/samza/sql/client/impl/SamzaExecutorTest.java b/samza-sql-shell/src/test/java/org/apache/samza/sql/client/impl/SamzaExecutorTest.java
index ac2d45d..067fafb 100644
--- a/samza-sql-shell/src/test/java/org/apache/samza/sql/client/impl/SamzaExecutorTest.java
+++ b/samza-sql-shell/src/test/java/org/apache/samza/sql/client/impl/SamzaExecutorTest.java
@@ -35,50 +35,51 @@
 
 
 public class SamzaExecutorTest {
-    private SamzaExecutor m_executor = new SamzaExecutor();
+  private SamzaExecutor mExecutor = new SamzaExecutor();
 
-    @Test
-    public void testGetTableSchema() throws ExecutorException {
-        prepareEnvironmentVariable();
-        SqlSchema ts = m_executor.getTableSchema(new ExecutionContext(), "kafka.ProfileChangeStream");
+  @Test
+  public void testGetTableSchema() throws ExecutorException {
+    prepareEnvironmentVariable();
+    SqlSchema ts = mExecutor.getTableSchema(new ExecutionContext(), "kafka.ProfileChangeStream");
 
-        List<SqlSchema.SqlField> fields = ts.getFields();
-        Assert.assertEquals("Name", fields.get(0).getFieldName());
-        Assert.assertEquals("NewCompany", fields.get(1).getFieldName());
-        Assert.assertEquals("OldCompany", fields.get(2).getFieldName());
-        Assert.assertEquals("ProfileChangeTimestamp", fields.get(3).getFieldName());
-        Assert.assertEquals("STRING", fields.get(0).getFieldSchema().getFieldType().toString());
-        Assert.assertEquals("STRING", fields.get(1).getFieldSchema().getFieldType().toString());
-        Assert.assertEquals("STRING", fields.get(2).getFieldSchema().getFieldType().toString());
-        Assert.assertEquals("INT64", fields.get(3).getFieldSchema().getFieldType().toString());
-    }
+    List<SqlSchema.SqlField> fields = ts.getFields();
+    Assert.assertEquals("Name", fields.get(0).getFieldName());
+    Assert.assertEquals("NewCompany", fields.get(1).getFieldName());
+    Assert.assertEquals("OldCompany", fields.get(2).getFieldName());
+    Assert.assertEquals("ProfileChangeTimestamp", fields.get(3).getFieldName());
+    Assert.assertEquals("STRING", fields.get(0).getFieldSchema().getFieldType().toString());
+    Assert.assertEquals("STRING", fields.get(1).getFieldSchema().getFieldType().toString());
+    Assert.assertEquals("STRING", fields.get(2).getFieldSchema().getFieldType().toString());
+    Assert.assertEquals("INT64", fields.get(3).getFieldSchema().getFieldType().toString());
+  }
 
-    // Generate result schema needs to be fixed. SAMZA-2079
-    @Ignore
-    @Test
-    public void testGenerateResultSchema() {
-        prepareEnvironmentVariable();
-        Map<String, String> mapConf = m_executor.fetchSamzaSqlConfig(1);
-        SqlSchema ts = m_executor.generateResultSchema(new MapConfig(mapConf));
+  // Generate result schema needs to be fixed. SAMZA-2079
+  @Ignore
+  @Test
+  public void testGenerateResultSchema() {
+    prepareEnvironmentVariable();
+    Map<String, String> mapConf = mExecutor.fetchSamzaSqlConfig(1);
+    SqlSchema ts = mExecutor.generateResultSchema(new MapConfig(mapConf));
 
-        List<SqlSchema.SqlField> fields = ts.getFields();
-        Assert.assertEquals("__key__", fields.get(0).getFieldName());
-        Assert.assertEquals("Name", fields.get(1).getFieldName());
-        Assert.assertEquals("NewCompany", fields.get(2).getFieldName());
-        Assert.assertEquals("OldCompany", fields.get(3).getFieldName());
-        Assert.assertEquals("ProfileChangeTimestamp", fields.get(4).getFieldName());
-        Assert.assertEquals("ANY", fields.get(0).getFieldSchema().getFieldType().toString());
-        Assert.assertEquals("VARCHAR", fields.get(1).getFieldSchema().getFieldType().toString());
-        Assert.assertEquals("VARCHAR", fields.get(2).getFieldSchema().getFieldType().toString());
-        Assert.assertEquals("VARCHAR", fields.get(3).getFieldSchema().getFieldType().toString());
-        Assert.assertEquals("BIGINT", fields.get(4).getFieldSchema().getFieldType().toString());
-    }
+    List<SqlSchema.SqlField> fields = ts.getFields();
+    Assert.assertEquals("__key__", fields.get(0).getFieldName());
+    Assert.assertEquals("Name", fields.get(1).getFieldName());
+    Assert.assertEquals("NewCompany", fields.get(2).getFieldName());
+    Assert.assertEquals("OldCompany", fields.get(3).getFieldName());
+    Assert.assertEquals("ProfileChangeTimestamp", fields.get(4).getFieldName());
+    Assert.assertEquals("ANY", fields.get(0).getFieldSchema().getFieldType().toString());
+    Assert.assertEquals("VARCHAR", fields.get(1).getFieldSchema().getFieldType().toString());
+    Assert.assertEquals("VARCHAR", fields.get(2).getFieldSchema().getFieldType().toString());
+    Assert.assertEquals("VARCHAR", fields.get(3).getFieldSchema().getFieldType().toString());
+    Assert.assertEquals("BIGINT", fields.get(4).getFieldSchema().getFieldType().toString());
+  }
 
-    private void prepareEnvironmentVariable() {
-        ClassLoader classLoader = getClass().getClassLoader();
-        File file = new File(classLoader.getResource("ProfileChangeStream.avsc").getFile());
-        EnvironmentVariableHandler handler = m_executor.getEnvironmentVariableHandler();
-        handler.setEnvironmentVariable("samza.sql.relSchemaProvider.config.schemaDir", file.getParent());
-        handler.setEnvironmentVariable(CFG_SQL_STMT, "insert into log.outputStream select * from kafka.ProfileChangeStream");
-    }
+  private void prepareEnvironmentVariable() {
+    ClassLoader classLoader = getClass().getClassLoader();
+    File file = new File(classLoader.getResource("ProfileChangeStream.avsc").getFile());
+    EnvironmentVariableHandler handler = mExecutor.getEnvironmentVariableHandler();
+    handler.setEnvironmentVariable("samza.sql.relSchemaProvider.config.schemaDir", file.getParent());
+    handler.setEnvironmentVariable(CFG_SQL_STMT,
+        "insert into log.outputStream select * from kafka.ProfileChangeStream");
+  }
 }
diff --git a/samza-sql-shell/src/test/java/org/apache/samza/sql/client/util/RandomAccessQueueTest.java b/samza-sql-shell/src/test/java/org/apache/samza/sql/client/util/RandomAccessQueueTest.java
index fe7a19a..3048d7d 100644
--- a/samza-sql-shell/src/test/java/org/apache/samza/sql/client/util/RandomAccessQueueTest.java
+++ b/samza-sql-shell/src/test/java/org/apache/samza/sql/client/util/RandomAccessQueueTest.java
@@ -25,50 +25,50 @@
 
 
 public class RandomAccessQueueTest {
-  private RandomAccessQueue m_queue;
+  private RandomAccessQueue mQueue;
   public RandomAccessQueueTest() {
-    m_queue = new RandomAccessQueue<>(Integer.class, 5);
+    mQueue = new RandomAccessQueue<>(Integer.class, 5);
   }
 
   @Test
   public void testAddAndGetElement() {
-    m_queue.clear();
+    mQueue.clear();
     for (int i = 0; i < 4; i++) {
-      m_queue.add(i);
+      mQueue.add(i);
     }
-    Assert.assertEquals(0, m_queue.getHead());
-    Assert.assertEquals(4, m_queue.getSize());
-    Assert.assertEquals(0, m_queue.get(0));
-    Assert.assertEquals(3, m_queue.get(3));
+    Assert.assertEquals(0, mQueue.getHead());
+    Assert.assertEquals(4, mQueue.getSize());
+    Assert.assertEquals(0, mQueue.get(0));
+    Assert.assertEquals(3, mQueue.get(3));
 
     for (int i = 0; i < 3; i++) {
-      m_queue.add(4 + i);
+      mQueue.add(4 + i);
     }
-    int head = m_queue.getHead();
+    int head = mQueue.getHead();
     Assert.assertEquals(2, head);
-    Assert.assertEquals(5, m_queue.getSize());
-    Assert.assertEquals(2, m_queue.get(0));
-    Assert.assertEquals(3, m_queue.get(1));
-    Assert.assertEquals(4, m_queue.get(2));
-    Assert.assertEquals(5, m_queue.get(3));
-    Assert.assertEquals(6, m_queue.get(4));
+    Assert.assertEquals(5, mQueue.getSize());
+    Assert.assertEquals(2, mQueue.get(0));
+    Assert.assertEquals(3, mQueue.get(1));
+    Assert.assertEquals(4, mQueue.get(2));
+    Assert.assertEquals(5, mQueue.get(3));
+    Assert.assertEquals(6, mQueue.get(4));
   }
 
   @Test
   public void testGetRange() {
-    m_queue.clear();
+    mQueue.clear();
     for (int i = 0; i < 4; i++) {
-      m_queue.add(i); // 0, 1, 2, 3
+      mQueue.add(i); // 0, 1, 2, 3
     }
-    List<Integer> rets = m_queue.get(-1, 9);
+    List<Integer> rets = mQueue.get(-1, 9);
     Assert.assertEquals(4, rets.size());
-    Assert.assertEquals(0, m_queue.get(0));
-    Assert.assertEquals(3, m_queue.get(3));
+    Assert.assertEquals(0, mQueue.get(0));
+    Assert.assertEquals(3, mQueue.get(3));
 
     for (int i = 0; i <= 2; i++) {
-      m_queue.add(4 + i);
+      mQueue.add(4 + i);
     }
-    rets = m_queue.get(0, 4);
+    rets = mQueue.get(0, 4);
     Assert.assertEquals(2, rets.get(0).intValue());
     Assert.assertEquals(3, rets.get(1).intValue());
     Assert.assertEquals(4, rets.get(2).intValue());
@@ -78,12 +78,12 @@
 
   @Test
   public void testConsume() {
-    m_queue.clear();
+    mQueue.clear();
     for (int i = 0; i < 4; i++) {
-      m_queue.add(i); // 0, 1, 2, 3
+      mQueue.add(i); // 0, 1, 2, 3
     }
-    List<Integer> rets = m_queue.consume(1, 2);
-    Assert.assertEquals(1, m_queue.getSize());
-    Assert.assertEquals(3, m_queue.getHead());
+    List<Integer> rets = mQueue.consume(1, 2);
+    Assert.assertEquals(1, mQueue.getSize());
+    Assert.assertEquals(3, mQueue.getHead());
   }
 }
\ No newline at end of file
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/SamzaSqlInputMessage.java b/samza-sql/src/main/java/org/apache/samza/sql/SamzaSqlInputMessage.java
index 7ce0339..5142ead 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/SamzaSqlInputMessage.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/SamzaSqlInputMessage.java
@@ -47,7 +47,7 @@
    * @param metadata metadata of the message
    * @return new object of SamzaSqlInputMessage type
    */
-  public static SamzaSqlInputMessage of (KV<Object, Object> keyAndMessageKV, SamzaSqlRelMsgMetadata metadata) {
+  public static SamzaSqlInputMessage of(KV<Object, Object> keyAndMessageKV, SamzaSqlRelMsgMetadata metadata) {
     return new SamzaSqlInputMessage(keyAndMessageKV, metadata);
   }
 
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverter.java b/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverter.java
index d70497e..c4138ea 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverter.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverter.java
@@ -125,11 +125,18 @@
         .collect(Collectors.toList()));
   }
 
-  private static SamzaSqlRelRecord convertToRelRecord(IndexedRecord avroRecord, Schema schema) {
+  private static SamzaSqlRelRecord convertToRelRecord(IndexedRecord avroRecord) {
     List<Object> fieldValues = new ArrayList<>();
     List<String> fieldNames = new ArrayList<>();
     if (avroRecord != null) {
-      fetchFieldNamesAndValuesFromIndexedRecord(avroRecord, fieldNames, fieldValues, schema);
+      fieldNames.addAll(
+          avroRecord.getSchema().getFields().stream().map(Schema.Field::name).collect(Collectors.toList()));
+      fieldValues.addAll(avroRecord.getSchema()
+          .getFields()
+          .stream()
+          .map(f -> convertToJavaObject(avroRecord.get(avroRecord.getSchema().getField(f.name()).pos()),
+              getNonNullUnionSchema(avroRecord.getSchema().getField(f.name()).schema())))
+          .collect(Collectors.toList()));
     } else {
       String msg = "Avro Record is null";
       LOG.error(msg);
@@ -194,10 +201,12 @@
             .collect(Collectors.toList());
         return avroList;
       case MAP:
-        return ((Map<String, ?>) relObj).entrySet()
-            .stream()
-            .collect(Collectors.toMap(Map.Entry::getKey,
-                e -> convertToAvroObject(e.getValue(), getNonNullUnionSchema(schema).getValueType())));
+        // If you ask why not using String and that is because some strings are Wrapped into org.apache.avro.util.Utf8
+        // TODO looking at the Utf8 code base it is not immutable, having it as a key is calling for trouble!
+        final Map<Object, Object> outputMap = new HashMap<>();
+        ((Map<Object, Object>) relObj).forEach((key, aValue) -> outputMap.put(key,
+            convertToAvroObject(aValue, getNonNullUnionSchema(schema).getValueType())));
+        return outputMap;
       case UNION:
         for (Schema unionSchema : schema.getTypes()) {
           if (isSchemaCompatibleWithRelObj(relObj, unionSchema)) {
@@ -224,7 +233,7 @@
     }
     switch (schema.getType()) {
       case RECORD:
-        return convertToRelRecord((IndexedRecord) avroObj, schema);
+        return convertToRelRecord((IndexedRecord) avroObj);
       case ARRAY: {
         ArrayList<Object> retVal = new ArrayList<>();
         List<Object> avroArray;
@@ -246,7 +255,7 @@
         retVal.putAll(((Map<String, ?>) avroObj).entrySet()
             .stream()
             .collect(Collectors.toMap(Map.Entry::getKey,
-                e -> convertToJavaObject(e.getValue(), getNonNullUnionSchema(schema).getValueType()))));
+              e -> convertToJavaObject(e.getValue(), getNonNullUnionSchema(schema).getValueType()))));
         return retVal;
       }
       case UNION:
@@ -321,7 +330,7 @@
       if (types.size() == 2) {
         if (types.get(0).getType() == Schema.Type.NULL) {
           return types.get(1);
-        } else if ((types.get(1).getType() == Schema.Type.NULL)) {
+        } else if (types.get(1).getType() == Schema.Type.NULL) {
           return types.get(0);
         }
       }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverterFactory.java b/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverterFactory.java
index 278735f..da1e4ef 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverterFactory.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroRelConverterFactory.java
@@ -20,7 +20,6 @@
 package org.apache.samza.sql.avro;
 
 import java.util.HashMap;
-import java.util.Properties;
 import org.apache.samza.config.Config;
 import org.apache.samza.sql.interfaces.RelSchemaProvider;
 import org.apache.samza.sql.interfaces.SamzaRelConverter;
@@ -39,6 +38,6 @@
   @Override
   public SamzaRelConverter create(SystemStream systemStream, RelSchemaProvider schemaProvider, Config config) {
     return relConverters.computeIfAbsent(systemStream,
-        ss -> new AvroRelConverter(ss, (AvroRelSchemaProvider) schemaProvider, config));
+      ss -> new AvroRelConverter(ss, (AvroRelSchemaProvider) schemaProvider, config));
   }
 }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroTypeFactoryImpl.java b/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroTypeFactoryImpl.java
index b98d25b..d837e03 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroTypeFactoryImpl.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/avro/AvroTypeFactoryImpl.java
@@ -148,11 +148,13 @@
   }
 
   private SqlFieldSchema getSqlTypeFromUnionTypes(List<Schema> types, boolean isNullable, boolean isOptional) {
-    // Typically a nullable field's schema is configured as an union of Null and a Type.
-    if (types.size() == 2) {
+    if (types.size() == 1) {
+      return convertField(types.get(0), true, true);
+    } else if (types.size() == 2) {
+      // Typically a nullable field's schema is configured as an union of Null and a Type.
       if (types.get(0).getType() == Schema.Type.NULL) {
         return convertField(types.get(1), true, true);
-      } else if ((types.get(1).getType() == Schema.Type.NULL)) {
+      } else if (types.get(1).getType() == Schema.Type.NULL) {
         return convertField(types.get(0), true, true);
       }
     } else if (types.size() > 2) {
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/data/RexToJavaCompiler.java b/samza-sql/src/main/java/org/apache/samza/sql/data/RexToJavaCompiler.java
index bc881d3..098d1a3 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/data/RexToJavaCompiler.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/data/RexToJavaCompiler.java
@@ -46,7 +46,6 @@
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexProgram;
 import org.apache.calcite.rex.RexProgramBuilder;
-import org.apache.calcite.sql.validate.SqlConformance;
 import org.apache.calcite.sql.validate.SqlConformanceEnum;
 import org.apache.calcite.util.Pair;
 import org.apache.samza.SamzaException;
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlExecutionContext.java b/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlExecutionContext.java
index 1e21365..d7690bb 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlExecutionContext.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlExecutionContext.java
@@ -24,7 +24,6 @@
 import java.util.List;
 import java.util.Map;
 
-import org.apache.samza.SamzaException;
 import org.apache.samza.config.Config;
 import org.apache.samza.context.Context;
 import org.apache.samza.sql.interfaces.UdfMetadata;
@@ -57,7 +56,7 @@
   public SamzaSqlExecutionContext(SamzaSqlApplicationConfig config) {
     this.sqlConfig = config;
     udfMetadata = new HashMap<>();
-    for(UdfMetadata udf : this.sqlConfig.getUdfMetadata()) {
+    for (UdfMetadata udf : this.sqlConfig.getUdfMetadata()) {
       udfMetadata.putIfAbsent(udf.getName(), new ArrayList<>());
       udfMetadata.get(udf.getName()).add(udf);
     }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMessage.java b/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMessage.java
index 791d79c..2baa8af 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMessage.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMessage.java
@@ -23,7 +23,7 @@
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Objects;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.sql.SamzaSqlRelRecord;
 import org.codehaus.jackson.annotate.JsonProperty;
 
@@ -124,7 +124,9 @@
   }
 
   @JsonProperty("samzaSqlRelMsgMetadata")
-  public SamzaSqlRelMsgMetadata getSamzaSqlRelMsgMetadata() { return samzaSqlRelMsgMetadata; }
+  public SamzaSqlRelMsgMetadata getSamzaSqlRelMsgMetadata() {
+    return samzaSqlRelMsgMetadata;
+  }
 
   public Object getKey() {
     return key;
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMsgMetadata.java b/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMsgMetadata.java
index e4f0b3b..ee82649 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMsgMetadata.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/data/SamzaSqlRelMsgMetadata.java
@@ -101,36 +101,50 @@
   }
 
 
-      @JsonProperty("eventTime")
-  public long getEventTime() { return eventTime;}
+  @JsonProperty("eventTime")
+  public long getEventTime() {
+    return eventTime;
+  }
 
   public void setEventTime(long eventTime) {
     this.eventTime = eventTime;
   }
 
-  public boolean hasEventTime() { return eventTime != 0L; }
+  public boolean hasEventTime() {
+    return eventTime != 0L;
+  }
 
   @JsonProperty("arrivalTime")
-  public long getArrivalTime() { return arrivalTime;}
+  public long getArrivalTime() {
+    return arrivalTime;
+  }
 
   public void setArrivalTime(long arrivalTime) {
     this.arrivalTime = arrivalTime;
   }
 
-  public boolean hasArrivalTime() { return arrivalTime != 0L; }
+  public boolean hasArrivalTime() {
+    return arrivalTime != 0L;
+  }
 
   @JsonProperty("scanTime")
-  public long getScanTimeNanos() { return scanTimeNanos;}
+  public long getScanTimeNanos() {
+    return scanTimeNanos;
+  }
 
   @JsonProperty("scanTimeMillis")
-  public long getScanTimeMillis() { return scanTimeMillis;}
+  public long getScanTimeMillis() {
+    return scanTimeMillis;
+  }
 
   public void setScanTime(long scanTimeNano, long scanTimeMillis) {
     this.scanTimeNanos = scanTimeNano;
     this.scanTimeMillis = scanTimeMillis;
   }
 
-  public boolean hasScanTime() { return scanTimeNanos != 0L; }
+  public boolean hasScanTime() {
+    return scanTimeNanos != 0L;
+  }
 
   @JsonIgnore
   public  void setIsSystemMessage(boolean isSystemMessage) {
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/dsl/SamzaSqlDslConverter.java b/samza-sql/src/main/java/org/apache/samza/sql/dsl/SamzaSqlDslConverter.java
index b09d3d6..acc5b42 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/dsl/SamzaSqlDslConverter.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/dsl/SamzaSqlDslConverter.java
@@ -52,9 +52,9 @@
   public Collection<RelRoot> convertDsl(String dsl) {
     // TODO: Introduce an API to parse a dsl string and return one or more sql statements
     List<String> sqlStmts = fetchSqlFromConfig(config);
-    QueryPlanner planner = getQueryPlanner(getSqlConfig(sqlStmts, config));
     List<RelRoot> relRoots = new LinkedList<>();
     for (String sql: sqlStmts) {
+      QueryPlanner planner = getQueryPlanner(getSqlConfig(Collections.singletonList(sql), config));
       // we always pass only select query to the planner for samza sql. The reason is that samza sql supports
       // schema evolution where source and destination could up to an extent have independent schema evolution while
       // calcite expects strict comformance of the destination schema with that of the fields in the select query.
@@ -87,7 +87,7 @@
    */
   public static QueryPlanner getQueryPlanner(SamzaSqlApplicationConfig sqlConfig) {
     return new QueryPlanner(sqlConfig.getRelSchemaProviders(), sqlConfig.getInputSystemStreamConfigBySource(),
-        sqlConfig.getUdfMetadata());
+        sqlConfig.getUdfMetadata(), sqlConfig.isQueryPlanOptimizerEnabled());
   }
 
   /**
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/fn/BuildOutputRecordUdf.java b/samza-sql/src/main/java/org/apache/samza/sql/fn/BuildOutputRecordUdf.java
index 7f9de1a..3c427cc 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/fn/BuildOutputRecordUdf.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/fn/BuildOutputRecordUdf.java
@@ -21,7 +21,7 @@
 
 import java.util.ArrayList;
 import java.util.List;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.config.Config;
 import org.apache.samza.context.Context;
 import org.apache.samza.sql.SamzaSqlRelRecord;
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/fn/GetNestedFieldUdf.java b/samza-sql/src/main/java/org/apache/samza/sql/fn/GetNestedFieldUdf.java
deleted file mode 100644
index 4ef2a11..0000000
--- a/samza-sql/src/main/java/org/apache/samza/sql/fn/GetNestedFieldUdf.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*/
-
-package org.apache.samza.sql.fn;
-
-import org.apache.samza.config.Config;
-import org.apache.samza.context.Context;
-import org.apache.samza.sql.schema.SamzaSqlFieldType;
-import org.apache.samza.sql.udfs.SamzaSqlUdf;
-import org.apache.samza.sql.udfs.SamzaSqlUdfMethod;
-import org.apache.samza.sql.udfs.ScalarUdf;
-
-
-@SamzaSqlUdf(name = "GetNestedField", description = "UDF that extracts a field value from a nested SamzaSqlRelRecord")
-public class GetNestedFieldUdf implements ScalarUdf {
-  @Override
-  public void init(Config udfConfig, Context context) {
-  }
-
-  @SamzaSqlUdfMethod(params = {SamzaSqlFieldType.ANY, SamzaSqlFieldType.STRING},
-      returns = SamzaSqlFieldType.ANY)
-  public Object execute(Object currentFieldOrValue, String fieldName) {
-    GetSqlFieldUdf udf = new GetSqlFieldUdf();
-    return udf.getSqlField(currentFieldOrValue, fieldName);
-  }
-}
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/fn/GetSqlFieldUdf.java b/samza-sql/src/main/java/org/apache/samza/sql/fn/GetSqlFieldUdf.java
index ec05d55..6072a80 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/fn/GetSqlFieldUdf.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/fn/GetSqlFieldUdf.java
@@ -73,7 +73,7 @@
 
   public Object getSqlField(Object currentFieldOrValue, String fieldName) {
     if (currentFieldOrValue != null) {
-      String[] fieldNameChain = (fieldName).split("\\.");
+      String[] fieldNameChain = fieldName.split("\\.");
       for (int i = 0; i < fieldNameChain.length && currentFieldOrValue != null; i++) {
         currentFieldOrValue = extractField(fieldNameChain[i], currentFieldOrValue, true);
       }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/fn/RegexMatchUdf.java b/samza-sql/src/main/java/org/apache/samza/sql/fn/RegexMatchUdf.java
index 4ae7a80..c946aa0 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/fn/RegexMatchUdf.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/fn/RegexMatchUdf.java
@@ -31,7 +31,7 @@
 /**
  * Simple RegexMatch Udf.
  */
-@SamzaSqlUdf(name="RegexMatch", description = "Function to perform the regex match.")
+@SamzaSqlUdf(name = "RegexMatch", description = "Function to perform the regex match.")
 public class RegexMatchUdf implements ScalarUdf {
   @Override
   public void init(Config config, Context context) {
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedIOResolverFactory.java b/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedIOResolverFactory.java
index 89a32d8..dd58201 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedIOResolverFactory.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedIOResolverFactory.java
@@ -54,7 +54,7 @@
   }
 
   private class ConfigBasedIOResolver implements SqlIOResolver {
-    private final String SAMZA_SQL_QUERY_TABLE_KEYWORD = "$table";
+    private static final String SAMZA_SQL_QUERY_TABLE_KEYWORD = "$table";
     private final Config config;
     private final String changeLogStorePrefix;
 
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedUdfResolver.java b/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedUdfResolver.java
index 2b83b60..2b9381d 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedUdfResolver.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/impl/ConfigBasedUdfResolver.java
@@ -28,7 +28,7 @@
 import java.util.Map;
 import java.util.Properties;
 import java.util.stream.Collectors;
-import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.samza.SamzaException;
 import org.apache.samza.config.Config;
 import org.apache.samza.sql.interfaces.UdfMetadata;
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SamzaSqlDriver.java b/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SamzaSqlDriver.java
index 5c86df9..ef83a95 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SamzaSqlDriver.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SamzaSqlDriver.java
@@ -18,12 +18,10 @@
 
 import java.sql.Connection;
 import java.sql.SQLException;
-import java.util.List;
 import java.util.Properties;
 import org.apache.calcite.adapter.java.JavaTypeFactory;
 import org.apache.calcite.avatica.AvaticaConnection;
 import org.apache.calcite.avatica.ConnectStringParser;
-import org.apache.calcite.avatica.Meta;
 import org.apache.calcite.jdbc.CalciteFactory;
 import org.apache.calcite.jdbc.Driver;
 
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SqlIOConfig.java b/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SqlIOConfig.java
index 4350889..9dc4c1e 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SqlIOConfig.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/interfaces/SqlIOConfig.java
@@ -25,7 +25,7 @@
 import java.util.List;
 import java.util.Optional;
 
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.config.Config;
 import org.apache.samza.config.MapConfig;
 import org.apache.samza.config.StreamConfig;
@@ -81,11 +81,13 @@
     this.streamId = String.format("%s-%s", systemName, streamName);
 
     samzaRelConverterName = streamConfigs.get(CFG_SAMZA_REL_CONVERTER);
-    Validate.notEmpty(samzaRelConverterName, String.format("System %s is not supported. Please check if the system name is provided correctly.", systemName));
+    Validate.notEmpty(samzaRelConverterName, String.format("System %s is not supported."
+        + "Please check if the system name is provided correctly.", systemName));
 
     if (isRemoteTable()) {
       samzaRelTableKeyConverterName = streamConfigs.get(CFG_SAMZA_REL_TABLE_KEY_CONVERTER);
-      Validate.notEmpty(samzaRelTableKeyConverterName, String.format("System %s is not supported. Please check if the system name is provided correctly.", systemName));
+      Validate.notEmpty(samzaRelTableKeyConverterName, String.format("System %s is not supported. "
+          + "Please check if the system name is provided correctly.", systemName));
     } else {
       samzaRelTableKeyConverterName = "";
     }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/interfaces/UdfMetadata.java b/samza-sql/src/main/java/org/apache/samza/sql/interfaces/UdfMetadata.java
index e3c5d60..9e15b21 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/interfaces/UdfMetadata.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/interfaces/UdfMetadata.java
@@ -35,7 +35,6 @@
   // retains the name as it is given to UdfMetadata.
   // For example: if displayName is 'GetSqlField', name would be 'GETSQLFIELD'.
   private final String name;
-  private final String displayName;
 
   private final String description;
   private final Method udfMethod;
@@ -47,10 +46,7 @@
 
   public UdfMetadata(String name, String description, Method udfMethod, Config udfConfig, List<SamzaSqlFieldType> arguments,
       SamzaSqlFieldType returnType, boolean disableArgCheck) {
-    // Udfs are case insensitive
-    this.name = name.toUpperCase();
-    // Let's also store the original name for display purposes.
-    this.displayName = name;
+    this.name = name;
     this.description = description;
     this.udfMethod = udfMethod;
     this.udfConfig = udfConfig;
@@ -88,7 +84,7 @@
    * @return Returns the name of the Udf for display purposes.
    */
   public String getDisplayName() {
-    return displayName;
+    return getName();
   }
 
   /**
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/Checker.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/Checker.java
index 8504be1..60794ef 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/planner/Checker.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/Checker.java
@@ -81,17 +81,15 @@
     } else {
       // 1. Generate a mapping from argument index to parsed calcite-type for the sql UDF.
       Map<Integer, RelDataType> argumentIndexToCalciteType = IntStream.range(0, callBinding.getOperandCount())
-              .boxed()
-              .collect(Collectors.toMap(
-                      operandIndex -> operandIndex,
-                      callBinding::getOperandType, (a, b) -> b));
+          .boxed()
+          .collect(Collectors.toMap(operandIndex -> operandIndex, callBinding::getOperandType, (a, b) -> b));
 
       UdfMetadata udfMetadata = udfMetadataOptional.get();
       List<SamzaSqlFieldType> udfArguments = udfMetadata.getArguments();
 
       // 2. Compare the argument type in samza-sql UDF against the RelType generated by the
       // calcite parser engine.
-      for (int udfArgumentIndex = 0; udfArgumentIndex < udfArguments.size(); ++ udfArgumentIndex) {
+      for (int udfArgumentIndex = 0; udfArgumentIndex < udfArguments.size(); ++udfArgumentIndex) {
         SamzaSqlFieldType udfArgumentType = udfArguments.get(udfArgumentIndex);
         SqlTypeName udfArgumentAsSqlType = toCalciteSqlType(udfArgumentType);
         RelDataType parsedSqlArgType = argumentIndexToCalciteType.get(udfArgumentIndex);
@@ -100,11 +98,11 @@
         if (parsedSqlArgType.getSqlTypeName() == SqlTypeName.CHAR && udfArgumentAsSqlType == SqlTypeName.VARCHAR) {
           return true;
         } else if (!Objects.equals(parsedSqlArgType.getSqlTypeName(), udfArgumentAsSqlType)
-                && !ANY_SQL_TYPE_NAMES.contains(parsedSqlArgType.getSqlTypeName()) && hasOneUdfMethod(udfMetadata)) {
+            && !ANY_SQL_TYPE_NAMES.contains(parsedSqlArgType.getSqlTypeName()) && hasOneUdfMethod(udfMetadata)) {
           // 3(b). Throw up and fail on mismatch between the SamzaSqlType and CalciteType for any argument.
-          String msg = String.format("Type mismatch in udf class: %s at argument index: %d." +
-                          "Expected type: %s, actual type: %s.", udfMetadata.getName(),
-                  udfArgumentIndex, parsedSqlArgType.getSqlTypeName(), udfArgumentAsSqlType);
+          String msg = String.format(
+              "Type mismatch in udf class: %s at argument index: %d." + "Expected type: %s, actual type: %s.",
+              udfMetadata.getName(), udfArgumentIndex, udfArgumentAsSqlType, parsedSqlArgType.getSqlTypeName());
           LOG.error(msg);
           throw new SamzaSqlValidatorException(msg);
         }
@@ -161,8 +159,9 @@
   static SqlTypeName toCalciteSqlType(SamzaSqlFieldType samzaSqlFieldType) {
     switch (samzaSqlFieldType) {
       case ANY:
-      case ROW:
         return SqlTypeName.ANY;
+      case ROW:
+        return SqlTypeName.ROW;
       case MAP:
         return SqlTypeName.MAP;
       case ARRAY:
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/QueryPlanner.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/QueryPlanner.java
index c09777d..dc37753 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/planner/QueryPlanner.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/QueryPlanner.java
@@ -19,21 +19,25 @@
 
 package org.apache.samza.sql.planner;
 
-import java.sql.Connection;
-import java.sql.DriverManager;
+import com.google.common.collect.ImmutableList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 import org.apache.calcite.config.Lex;
-import org.apache.calcite.jdbc.CalciteConnection;
-import org.apache.calcite.plan.Contexts;
+import org.apache.calcite.jdbc.CalciteSchema;
 import org.apache.calcite.plan.ConventionTraitDef;
+import org.apache.calcite.plan.RelOptRule;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.plan.RelTraitDef;
+import org.apache.calcite.plan.RelTraitSet;
 import org.apache.calcite.rel.RelCollationTraitDef;
 import org.apache.calcite.rel.RelRoot;
+import org.apache.calcite.rel.core.RelFactories;
+import org.apache.calcite.rel.metadata.DefaultRelMetadataProvider;
+import org.apache.calcite.rel.rules.FilterProjectTransposeRule;
+import org.apache.calcite.rel.rules.ProjectMergeRule;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.schema.SchemaPlus;
@@ -41,6 +45,7 @@
 import org.apache.calcite.schema.impl.AbstractSchema;
 import org.apache.calcite.schema.impl.AbstractTable;
 import org.apache.calcite.sql.SqlExplainLevel;
+import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlNode;
 import org.apache.calcite.sql.SqlOperatorTable;
 import org.apache.calcite.sql.parser.SqlParser;
@@ -50,14 +55,15 @@
 import org.apache.calcite.tools.FrameworkConfig;
 import org.apache.calcite.tools.Frameworks;
 import org.apache.calcite.tools.Planner;
+import org.apache.calcite.tools.Programs;
 import org.apache.samza.SamzaException;
 import org.apache.samza.sql.data.SamzaSqlRelMessage;
 import org.apache.samza.sql.interfaces.RelSchemaProvider;
 import org.apache.samza.sql.interfaces.SqlIOConfig;
+import org.apache.samza.sql.interfaces.UdfMetadata;
 import org.apache.samza.sql.schema.SamzaSqlFieldType;
 import org.apache.samza.sql.schema.SqlFieldSchema;
 import org.apache.samza.sql.schema.SqlSchema;
-import org.apache.samza.sql.interfaces.UdfMetadata;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -76,11 +82,15 @@
   // Mapping between the source to the SqlIOConfig corresponding to the source.
   private final Map<String, SqlIOConfig> systemStreamConfigBySource;
 
+  private final boolean isQueryPlanOptimizerEnabled;
+
   public QueryPlanner(Map<String, RelSchemaProvider> relSchemaProviders,
-      Map<String, SqlIOConfig> systemStreamConfigBySource, Collection<UdfMetadata> udfMetadata) {
+      Map<String, SqlIOConfig> systemStreamConfigBySource, Collection<UdfMetadata> udfMetadata,
+      boolean isQueryPlanOptimizerEnabled) {
     this.relSchemaProviders = relSchemaProviders;
     this.systemStreamConfigBySource = systemStreamConfigBySource;
     this.udfMetadata = udfMetadata;
+    this.isQueryPlanOptimizerEnabled = isQueryPlanOptimizerEnabled;
   }
 
   private void registerSourceSchemas(SchemaPlus rootSchema) {
@@ -109,43 +119,73 @@
     }
   }
 
-  public RelRoot plan(String query) {
+  private Planner getPlanner() {
+    Planner planner;
+    SchemaPlus rootSchema = CalciteSchema.createRootSchema(true, false).plus();
+    registerSourceSchemas(rootSchema);
+
+    List<SamzaSqlScalarFunctionImpl> samzaSqlFunctions =
+        udfMetadata.stream().map(SamzaSqlScalarFunctionImpl::new).collect(Collectors.toList());
+
+    final List<RelTraitDef> traitDefs = new ArrayList<>();
+
+    traitDefs.add(ConventionTraitDef.INSTANCE);
+    traitDefs.add(RelCollationTraitDef.INSTANCE);
+
+    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
+    sqlOperatorTables.add(new SamzaSqlOperatorTable());
+    sqlOperatorTables.add(new SamzaSqlUdfOperatorTable(samzaSqlFunctions));
+
+    // TODO: Introduce a pluggable rule factory.
+    List<RelOptRule> rules = ImmutableList.of(FilterProjectTransposeRule.INSTANCE, ProjectMergeRule.INSTANCE,
+        new SamzaSqlFilterRemoteJoinRule.SamzaSqlFilterIntoRemoteJoinRule(true, RelFactories.LOGICAL_BUILDER,
+            systemStreamConfigBySource));
+
+    // Using lenient so that !=,%,- are allowed.
+    FrameworkConfig frameworkConfig = Frameworks.newConfigBuilder()
+        .parserConfig(SqlParser.configBuilder()
+            .setLex(Lex.JAVA)
+            .setConformance(SqlConformanceEnum.LENIENT)
+            .setCaseSensitive(false) // Make Udfs case insensitive
+            .build())
+        .defaultSchema(rootSchema)
+        .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables))
+        .sqlToRelConverterConfig(SqlToRelConverter.Config.DEFAULT)
+        .traitDefs(traitDefs)
+        .programs(Programs.hep(rules, true, DefaultRelMetadataProvider.INSTANCE))
+        .build();
+    planner = Frameworks.getPlanner(frameworkConfig);
+    return planner;
+  }
+
+  private RelRoot optimize(Planner planner, RelRoot relRoot) {
+    RelTraitSet relTraitSet = RelTraitSet.createEmpty();
     try {
-      Connection connection = DriverManager.getConnection("jdbc:calcite:");
-      CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class);
-      SchemaPlus rootSchema = calciteConnection.getRootSchema();
-      registerSourceSchemas(rootSchema);
+      RelRoot optimizedRelRoot =
+          RelRoot.of(planner.transform(0, relTraitSet, relRoot.project()), SqlKind.SELECT);
+      LOG.info("query plan with optimization:\n"
+          + RelOptUtil.toString(optimizedRelRoot.rel, SqlExplainLevel.EXPPLAN_ATTRIBUTES));
+      return optimizedRelRoot;
+    } catch (Exception e) {
+      String errorMsg =
+          "Error while optimizing query plan:\n" + RelOptUtil.toString(relRoot.rel, SqlExplainLevel.EXPPLAN_ATTRIBUTES);
+      LOG.error(errorMsg, e);
+      throw new SamzaException(errorMsg, e);
+    }
+  }
 
-      List<SamzaSqlScalarFunctionImpl> samzaSqlFunctions = udfMetadata.stream()
-          .map(x -> new SamzaSqlScalarFunctionImpl(x))
-          .collect(Collectors.toList());
-
-      final List<RelTraitDef> traitDefs = new ArrayList<>();
-
-      traitDefs.add(ConventionTraitDef.INSTANCE);
-      traitDefs.add(RelCollationTraitDef.INSTANCE);
-
-      List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
-      sqlOperatorTables.add(new SamzaSqlOperatorTable());
-      sqlOperatorTables.add(new SamzaSqlUdfOperatorTable(samzaSqlFunctions));
-
-      // Using lenient so that !=,%,- are allowed.
-      FrameworkConfig frameworkConfig = Frameworks.newConfigBuilder()
-          .parserConfig(SqlParser.configBuilder().setLex(Lex.JAVA).setConformance(SqlConformanceEnum.LENIENT).build())
-          .defaultSchema(rootSchema)
-          .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables))
-          .sqlToRelConverterConfig(SqlToRelConverter.Config.DEFAULT)
-          .traitDefs(traitDefs)
-          .context(Contexts.EMPTY_CONTEXT)
-          .costFactory(null)
-          .build();
-      Planner planner = Frameworks.getPlanner(frameworkConfig);
-
+  public RelRoot plan(String query) {
+    try (Planner planner = getPlanner()) {
       SqlNode sql = planner.parse(query);
       SqlNode validatedSql = planner.validate(sql);
       RelRoot relRoot = planner.rel(validatedSql);
-      LOG.info("query plan:\n" + RelOptUtil.toString(relRoot.rel, SqlExplainLevel.ALL_ATTRIBUTES));
-      return relRoot;
+      LOG.info(
+          "query plan without optimization:\n" + RelOptUtil.toString(relRoot.rel, SqlExplainLevel.EXPPLAN_ATTRIBUTES));
+      if (!isQueryPlanOptimizerEnabled) {
+        LOG.info("Skipping query optimization as it is disabled.");
+        return relRoot;
+      }
+      return optimize(planner, relRoot);
     } catch (Exception e) {
       String errorMsg = SamzaSqlValidator.formatErrorString(query, e);
       LOG.error(errorMsg, e);
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/RelSchemaConverter.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/RelSchemaConverter.java
index c3735a4..fcc289c 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/planner/RelSchemaConverter.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/RelSchemaConverter.java
@@ -21,11 +21,13 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.stream.Collectors;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
 import org.apache.calcite.rel.type.RelDataTypeSystem;
 import org.apache.calcite.rel.type.RelRecordType;
+import org.apache.calcite.rel.type.StructKind;
 import org.apache.calcite.sql.type.ArraySqlType;
 import org.apache.calcite.sql.type.MapSqlType;
 import org.apache.calcite.sql.type.SqlTypeFactoryImpl;
@@ -94,6 +96,11 @@
       case INT64:
         return createTypeWithNullability(createSqlType(SqlTypeName.BIGINT), true);
       case ROW:
+        final RelDataType rowType = convertToRelSchema(fieldSchema.getRowSchema());
+        /* Using Fully Qualified names to ensure that at the last project the row is fully reconstructed */
+        return createTypeWithNullability(createStructType(StructKind.FULLY_QUALIFIED,
+            rowType.getFieldList().stream().map(RelDataTypeField::getType).collect(Collectors.toList()),
+            rowType.getFieldNames()), true);
       case ANY:
         // TODO Calcite execution engine doesn't support record type yet.
         return createTypeWithNullability(createSqlType(SqlTypeName.ANY), true);
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlFilterRemoteJoinRule.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlFilterRemoteJoinRule.java
new file mode 100644
index 0000000..fc84f65
--- /dev/null
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlFilterRemoteJoinRule.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.sql.planner;
+
+import com.google.common.collect.ImmutableList;
+import java.util.Map;
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.plan.RelOptRuleOperand;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Join;
+import org.apache.calcite.rel.core.JoinRelType;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.tools.RelBuilder;
+import org.apache.calcite.tools.RelBuilderFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.samza.sql.interfaces.SqlIOConfig;
+import org.apache.samza.sql.translator.JoinInputNode;
+import org.apache.samza.sql.translator.JoinInputNode.InputType;
+
+/**
+ * Planner rule for remote table joins that pushes filters above and
+ * within a join node into its children nodes.
+ * This class is customized form of Calcite's {@link org.apache.calcite.rel.rules.FilterJoinRule} for
+ * remote table joins.
+ */
+public abstract class SamzaSqlFilterRemoteJoinRule extends RelOptRule {
+  /** Whether to try to strengthen join-type. */
+  private final boolean smart;
+
+  Map<String, SqlIOConfig> systemStreamConfigBySource;
+
+  //~ Constructors -----------------------------------------------------------
+
+  /**
+   * Creates a SamzaSqlFilterRemoteJoinRule with an explicit root operand and
+   * factories.
+   */
+  protected SamzaSqlFilterRemoteJoinRule(RelOptRuleOperand operand, String id,
+      boolean smart, RelBuilderFactory relBuilderFactory, Map<String, SqlIOConfig> systemStreamConfigBySource) {
+    super(operand, relBuilderFactory, "SamzaSqlFilterRemoteJoinRule:" + id);
+    this.smart = smart;
+    this.systemStreamConfigBySource = systemStreamConfigBySource;
+  }
+
+  //~ Methods ----------------------------------------------------------------
+
+  protected void perform(RelOptRuleCall call, Filter filter,
+      Join join) {
+    final List<RexNode> joinFilters =
+        RelOptUtil.conjunctions(join.getCondition());
+
+    boolean donotOptimizeLeft = false;
+    boolean donotOptimizeRight = false;
+
+    JoinInputNode.InputType inputTypeOnLeft =
+        JoinInputNode.getInputType(join.getLeft(), systemStreamConfigBySource);
+    JoinInputNode.InputType inputTypeOnRight =
+        JoinInputNode.getInputType(join.getRight(), systemStreamConfigBySource);
+
+    // Disable this optimization for queries using local table.
+    if (inputTypeOnLeft == InputType.LOCAL_TABLE || inputTypeOnRight == InputType.LOCAL_TABLE) {
+      donotOptimizeLeft = true;
+      donotOptimizeRight = true;
+    }
+
+    // There is nothing to optimize on the remote table side as the lookup needs to happen first before filtering.
+    if (inputTypeOnLeft == InputType.REMOTE_TABLE) {
+      donotOptimizeLeft = true;
+    }
+    if (inputTypeOnRight == InputType.REMOTE_TABLE) {
+      donotOptimizeRight = true;
+    }
+
+    // If there is only the joinRel,
+    // make sure it does not match a cartesian product joinRel
+    // (with "true" condition), otherwise this rule will be applied
+    // again on the new cartesian product joinRel.
+    if (filter == null && joinFilters.isEmpty()) {
+      return;
+    }
+
+    final List<RexNode> aboveFilters =
+        filter != null
+            ? RelOptUtil.conjunctions(filter.getCondition())
+            : new ArrayList<>();
+    final ImmutableList<RexNode> origAboveFilters =
+        ImmutableList.copyOf(aboveFilters);
+
+    // Simplify Outer Joins
+    JoinRelType joinType = join.getJoinType();
+    if (smart
+        && !origAboveFilters.isEmpty()
+        && join.getJoinType() != JoinRelType.INNER) {
+      joinType = RelOptUtil.simplifyJoin(join, origAboveFilters, joinType);
+    }
+
+    final List<RexNode> leftFilters = new ArrayList<>();
+    final List<RexNode> rightFilters = new ArrayList<>();
+
+    // TODO - add logic to derive additional filters.  E.g., from
+    // (t1.a = 1 AND t2.a = 2) OR (t1.b = 3 AND t2.b = 4), you can
+    // derive table filters:
+    // (t1.a = 1 OR t1.b = 3)
+    // (t2.a = 2 OR t2.b = 4)
+
+    // Try to push down above filters. These are typically where clause
+    // filters. They can be pushed down if they are not on the NULL
+    // generating side.
+    // We do not push into join condition as we do not benefit much. There is also correctness issue
+    // with remote table as we will not have values for the remote table before the join/lookup.
+    // leftFilters and rightFilters are populated in classifyFilters API.
+    boolean filterPushed = false;
+    if (RelOptUtil.classifyFilters(
+        join,
+        aboveFilters,
+        joinType,
+        false, // Let's not push into join filter
+        !joinType.generatesNullsOnLeft() && !donotOptimizeLeft,
+        !joinType.generatesNullsOnRight() && !donotOptimizeRight,
+        joinFilters,
+        leftFilters,
+        rightFilters)) {
+      filterPushed = true;
+    }
+
+    // If no filter got pushed after validate, reset filterPushed flag
+    if (leftFilters.isEmpty()
+        && rightFilters.isEmpty()) {
+      filterPushed = false;
+    }
+
+    boolean isAntiJoin = joinType == JoinRelType.ANTI;
+
+    // Try to push down filters in ON clause. A ON clause filter can only be
+    // pushed down if it does not affect the non-matching set, i.e. it is
+    // not on the side which is preserved.
+    // A ON clause filter of anti-join can not be pushed down.
+    if (!isAntiJoin && RelOptUtil.classifyFilters(
+        join,
+        joinFilters,
+        joinType,
+        false,
+        !joinType.generatesNullsOnLeft() && !donotOptimizeLeft,
+        !joinType.generatesNullsOnRight() && !donotOptimizeRight,
+        joinFilters,
+        leftFilters,
+        rightFilters)) {
+      filterPushed = true;
+    }
+
+    // if nothing actually got pushed and there is nothing leftover,
+    // then this rule is a no-op
+    if ((!filterPushed
+        && joinType == join.getJoinType())
+        || (joinFilters.isEmpty()
+        && leftFilters.isEmpty()
+        && rightFilters.isEmpty())) {
+      return;
+    }
+
+    // create Filters on top of the children if any filters were
+    // pushed to them
+    final RexBuilder rexBuilder = join.getCluster().getRexBuilder();
+    final RelBuilder relBuilder = call.builder();
+
+    final RelNode leftRel = relBuilder.push(join.getLeft()).filter(leftFilters).build();
+    final RelNode rightRel = relBuilder.push(join.getRight()).filter(rightFilters).build();
+
+    // create the new join node referencing the new children and
+    // containing its new join filters (if there are any)
+    final ImmutableList<RelDataType> fieldTypes =
+        ImmutableList.<RelDataType>builder()
+            .addAll(RelOptUtil.getFieldTypeList(leftRel.getRowType()))
+            .addAll(RelOptUtil.getFieldTypeList(rightRel.getRowType())).build();
+    final RexNode joinFilter =
+        RexUtil.composeConjunction(rexBuilder,
+            RexUtil.fixUp(rexBuilder, joinFilters, fieldTypes));
+
+    // If nothing actually got pushed and there is nothing leftover,
+    // then this rule is a no-op
+    if (joinFilter.isAlwaysTrue()
+        && leftFilters.isEmpty()
+        && rightFilters.isEmpty()
+        && joinType == join.getJoinType()) {
+      return;
+    }
+
+    RelNode newJoinRel =
+        join.copy(
+            join.getTraitSet(),
+            joinFilter,
+            leftRel,
+            rightRel,
+            joinType,
+            join.isSemiJoinDone());
+    call.getPlanner().onCopy(join, newJoinRel);
+    if (!leftFilters.isEmpty()) {
+      call.getPlanner().onCopy(filter, leftRel);
+    }
+    if (!rightFilters.isEmpty()) {
+      call.getPlanner().onCopy(filter, rightRel);
+    }
+
+    relBuilder.push(newJoinRel);
+
+    // Create a project on top of the join if some of the columns have become
+    // NOT NULL due to the join-type getting stricter.
+    relBuilder.convert(join.getRowType(), false);
+
+    // create a FilterRel on top of the join if needed
+    relBuilder.filter(
+        RexUtil.fixUp(rexBuilder, aboveFilters,
+            RelOptUtil.getFieldTypeList(relBuilder.peek().getRowType())));
+
+    call.transformTo(relBuilder.build());
+  }
+
+  /** Rule that tries to push the stream side of the filter expressions into the input of the join. */
+  public static class SamzaSqlFilterIntoRemoteJoinRule extends SamzaSqlFilterRemoteJoinRule {
+    public SamzaSqlFilterIntoRemoteJoinRule(boolean smart,
+        RelBuilderFactory relBuilderFactory, Map<String, SqlIOConfig> systemStreamConfigBySource) {
+      super(
+          operand(Filter.class,
+              operand(Join.class, RelOptRule.any())),
+          "SamzaSqlFilterRemoteJoinRule:filter", smart, relBuilderFactory, systemStreamConfigBySource);
+    }
+
+    @Override public void onMatch(RelOptRuleCall call) {
+      Filter filter = call.rel(0);
+      Join join = call.rel(1);
+      perform(call, filter, join);
+    }
+  }
+}
+
+// End SamzaSqlFilterRemoteJoinRule.java
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlOperatorTable.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlOperatorTable.java
index 766925a..6b8e8ba 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlOperatorTable.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlOperatorTable.java
@@ -28,7 +28,6 @@
 import org.apache.calcite.sql.SqlPrefixOperator;
 import org.apache.calcite.sql.SqlSpecialOperator;
 import org.apache.calcite.sql.fun.SqlArrayValueConstructor;
-import org.apache.calcite.sql.fun.SqlCoalesceFunction;
 import org.apache.calcite.sql.fun.SqlDatePartFunction;
 import org.apache.calcite.sql.fun.SqlMapValueConstructor;
 import org.apache.calcite.sql.fun.SqlMultisetQueryConstructor;
@@ -36,6 +35,7 @@
 import org.apache.calcite.sql.fun.SqlRowOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.util.ReflectiveSqlOperatorTable;
+import org.apache.samza.sql.udf.GetNestedField;
 
 
 /**
@@ -144,6 +144,7 @@
   public static final SqlFunction TUMBLE = SqlStdOperatorTable.TUMBLE;
   public static final SqlFunction TUMBLE_END = SqlStdOperatorTable.TUMBLE_END;
   public static final SqlFunction TUMBLE_START = SqlStdOperatorTable.TUMBLE_START;
+  public static final SqlFunction GET_NESTED_FIELD_OP = GetNestedField.INSTANCE;
 
   public SamzaSqlOperatorTable() {
     init();
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlScalarFunctionImpl.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlScalarFunctionImpl.java
index 21a48e9..4503370 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlScalarFunctionImpl.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlScalarFunctionImpl.java
@@ -86,7 +86,7 @@
       List<Expression> convertedOperands = new ArrayList<>();
       // SAMZA: 2230 To allow UDFS to accept Untyped arguments.
       // We explicitly Convert the untyped arguments to type that the UDf expects.
-      for(int index = 0; index < translatedOperands.size(); index++) {
+      for (int index = 0; index < translatedOperands.size(); index++) {
         if (!udfMetadata.isDisableArgCheck() && translatedOperands.get(index).type == Object.class
             && udfMethod.getParameters()[index].getType() != Object.class) {
           convertedOperands.add(Expressions.convert_(translatedOperands.get(index), udfMethod.getParameters()[index].getType()));
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlUdfOperatorTable.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlUdfOperatorTable.java
index eb215c8..1754721 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlUdfOperatorTable.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlUdfOperatorTable.java
@@ -29,6 +29,7 @@
 import org.apache.calcite.sql.SqlSyntax;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.util.ListSqlOperatorTable;
+import org.apache.calcite.sql.validate.SqlNameMatcher;
 import org.apache.calcite.sql.validate.SqlUserDefinedFunction;
 import org.apache.samza.sql.interfaces.UdfMetadata;
 
@@ -45,7 +46,7 @@
     List<UdfMetadata> udfMetadataList = new ArrayList<>();
     scalarFunctions.forEach(samzaSqlScalarFunction -> {
       udfMetadataList.add(samzaSqlScalarFunction.getUdfMetadata());
-      });
+    });
     return scalarFunctions.stream().map(scalarFunction -> getSqlOperator(scalarFunction, udfMetadataList)).collect(Collectors.toList());
   }
 
@@ -53,31 +54,20 @@
     int numArguments = scalarFunction.numberOfArguments();
     UdfMetadata udfMetadata = scalarFunction.getUdfMetadata();
 
-    if(udfMetadata.isDisableArgCheck()) {
+    if (udfMetadata.isDisableArgCheck()) {
       return new SqlUserDefinedFunction(new SqlIdentifier(scalarFunction.getUdfName(), SqlParserPos.ZERO),
-          o -> scalarFunction.getReturnType(o.getTypeFactory()), null, Checker.ANY_CHECKER,
-          null, scalarFunction);
+        o -> scalarFunction.getReturnType(o.getTypeFactory()), null, Checker.ANY_CHECKER, null, scalarFunction);
     } else {
-      return new SqlUserDefinedFunction(
-              new SqlIdentifier(scalarFunction.getUdfName(),
-                                SqlParserPos.ZERO),
-          o -> scalarFunction.getReturnType(o.getTypeFactory()),
-          null,
-          Checker.getChecker(numArguments, numArguments, udfMetadata),
-          null,
-              scalarFunction);
+      return new SqlUserDefinedFunction(new SqlIdentifier(scalarFunction.getUdfName(), SqlParserPos.ZERO),
+        o -> scalarFunction.getReturnType(o.getTypeFactory()), null,
+        Checker.getChecker(numArguments, numArguments, udfMetadata), null, scalarFunction);
     }
   }
 
   @Override
   public void lookupOperatorOverloads(SqlIdentifier opName, SqlFunctionCategory category, SqlSyntax syntax,
-      List<SqlOperator> operatorList) {
-    SqlIdentifier upperCaseOpName = opName;
-    // Only udfs are case insensitive
-    if (category != null && category.equals(SqlFunctionCategory.USER_DEFINED_FUNCTION)) {
-      upperCaseOpName = new SqlIdentifier(opName.names.get(0).toUpperCase(), opName.getComponentParserPosition(0));
-    }
-    operatorTable.lookupOperatorOverloads(upperCaseOpName, category, syntax, operatorList);
+      List<SqlOperator> operatorList, SqlNameMatcher nameMatcher) {
+    operatorTable.lookupOperatorOverloads(opName, category, syntax, operatorList, nameMatcher);
   }
 
   @Override
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlValidator.java b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlValidator.java
index d0c51a1..f5a7e67 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlValidator.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/planner/SamzaSqlValidator.java
@@ -20,6 +20,7 @@
 package org.apache.samza.sql.planner;
 
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.Scanner;
@@ -34,8 +35,8 @@
 import org.apache.calcite.rel.type.RelRecordType;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.SamzaException;
 import org.apache.samza.config.Config;
 import org.apache.samza.sql.data.SamzaSqlRelMessage;
@@ -69,10 +70,10 @@
    * @throws SamzaSqlValidatorException exception for sql validation
    */
   public void validate(List<String> sqlStmts) throws SamzaSqlValidatorException {
-    SamzaSqlApplicationConfig sqlConfig = SamzaSqlDslConverter.getSqlConfig(sqlStmts, config);
-    QueryPlanner planner = SamzaSqlDslConverter.getQueryPlanner(sqlConfig);
-
     for (String sql: sqlStmts) {
+      SamzaSqlApplicationConfig sqlConfig = SamzaSqlDslConverter.getSqlConfig(Collections.singletonList(sql), config);
+      QueryPlanner planner = SamzaSqlDslConverter.getQueryPlanner(sqlConfig);
+
       // we always pass only select query to the planner for samza sql. The reason is that samza sql supports
       // schema evolution where source and destination could up to an extent have independent schema evolution while
       // calcite expects strict conformance of the destination schema with that of the fields in the select query.
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationConfig.java b/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationConfig.java
index 3d4047e..f98879c 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationConfig.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationConfig.java
@@ -35,8 +35,8 @@
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.RelRoot;
 import org.apache.calcite.rel.core.TableModify;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.config.Config;
 import org.apache.samza.config.MapConfig;
 import org.apache.samza.sql.dsl.SamzaSqlDslConverter;
@@ -90,6 +90,7 @@
   public static final String CFG_METADATA_TOPIC_PREFIX = "samza.sql.metadataTopicPrefix";
   public static final String CFG_GROUPBY_WINDOW_DURATION_MS = "samza.sql.groupby.window.ms";
   public static final String CFG_SQL_PROCESS_SYSTEM_EVENTS = "samza.sql.processSystemEvents";
+  public static final String CFG_SQL_ENABLE_PLAN_OPTIMIZER = "samza.sql.enablePlanOptimizer";
 
   public static final String SAMZA_SYSTEM_LOG = "log";
 
@@ -117,6 +118,7 @@
   private final String metadataTopicPrefix;
   private final long windowDurationMs;
   private final boolean processSystemEvents;
+  private final boolean enableQueryPlanOptimizer;
 
   public SamzaSqlApplicationConfig(Config staticConfig, List<String> inputSystemStreams,
       List<String> outputSystemStreams) {
@@ -145,22 +147,22 @@
 
     relSchemaProvidersBySource = systemStreamConfigs.stream()
         .collect(Collectors.toMap(SqlIOConfig::getSource,
-            x -> initializePlugin("RelSchemaProvider", x.getRelSchemaProviderName(), staticConfig,
-                CFG_FMT_REL_SCHEMA_PROVIDER_DOMAIN,
-                (o, c) -> ((RelSchemaProviderFactory) o).create(x.getSystemStream(), c))));
+          x -> initializePlugin("RelSchemaProvider", x.getRelSchemaProviderName(), staticConfig,
+            CFG_FMT_REL_SCHEMA_PROVIDER_DOMAIN,
+            (o, c) -> ((RelSchemaProviderFactory) o).create(x.getSystemStream(), c))));
 
     samzaRelConvertersBySource = systemStreamConfigs.stream()
         .collect(Collectors.toMap(SqlIOConfig::getSource,
-            x -> initializePlugin("SamzaRelConverter", x.getSamzaRelConverterName(), staticConfig,
-                CFG_FMT_SAMZA_REL_CONVERTER_DOMAIN, (o, c) -> ((SamzaRelConverterFactory) o).create(x.getSystemStream(),
-                    relSchemaProvidersBySource.get(x.getSource()), c))));
+          x -> initializePlugin("SamzaRelConverter", x.getSamzaRelConverterName(), staticConfig,
+            CFG_FMT_SAMZA_REL_CONVERTER_DOMAIN, (o, c) -> ((SamzaRelConverterFactory) o).create(x.getSystemStream(),
+              relSchemaProvidersBySource.get(x.getSource()), c))));
 
     samzaRelTableKeyConvertersBySource = systemStreamConfigs.stream()
         .filter(SqlIOConfig::isRemoteTable)
         .collect(Collectors.toMap(SqlIOConfig::getSource,
-            x -> initializePlugin("SamzaRelTableKeyConverter", x.getSamzaRelTableKeyConverterName(),
-                staticConfig, CFG_FMT_SAMZA_REL_TABLE_KEY_CONVERTER_DOMAIN,
-                (o, c) -> ((SamzaRelTableKeyConverterFactory) o).create(x.getSystemStream(), c))));
+          x -> initializePlugin("SamzaRelTableKeyConverter", x.getSamzaRelTableKeyConverterName(),
+            staticConfig, CFG_FMT_SAMZA_REL_TABLE_KEY_CONVERTER_DOMAIN,
+            (o, c) -> ((SamzaRelTableKeyConverterFactory) o).create(x.getSystemStream(), c))));
 
     udfResolver = createUdfResolver(staticConfig);
     udfMetadata = udfResolver.getUdfs();
@@ -170,6 +172,7 @@
 
     processSystemEvents = staticConfig.getBoolean(CFG_SQL_PROCESS_SYSTEM_EVENTS, true);
     windowDurationMs = staticConfig.getLong(CFG_GROUPBY_WINDOW_DURATION_MS, DEFAULT_GROUPBY_WINDOW_DURATION_MS);
+    enableQueryPlanOptimizer = staticConfig.getBoolean(CFG_SQL_ENABLE_PLAN_OPTIMIZER, true);
   }
 
   public static <T> T initializePlugin(String pluginName, String plugin, Config staticConfig,
@@ -203,7 +206,7 @@
     Config newConfig = new MapConfig(Arrays.asList(config, metadataPrefixProperties));
     Validate.notEmpty(sourceResolveValue, "ioResolver config is not set or empty");
     return initializePlugin("SqlIOResolver", sourceResolveValue, newConfig, CFG_FMT_SOURCE_RESOLVER_DOMAIN,
-        (o, c) -> ((SqlIOResolverFactory) o).create(c, newConfig));
+      (o, c) -> ((SqlIOResolverFactory) o).create(c, newConfig));
   }
 
   private UdfResolver createUdfResolver(Map<String, String> config) {
@@ -282,7 +285,7 @@
         }
       }
     }
-     List<RelNode> relNodes = relNode.getInputs();
+    List<RelNode> relNodes = relNode.getInputs();
     if (relNodes == null || relNodes.isEmpty()) {
       return;
     }
@@ -336,4 +339,8 @@
   public boolean isProcessSystemEvents() {
     return processSystemEvents;
   }
+
+  public boolean isQueryPlanOptimizerEnabled() {
+    return enableQueryPlanOptimizer;
+  }
 }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationRunner.java b/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationRunner.java
index ce42bee..23d7be5 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationRunner.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/runner/SamzaSqlApplicationRunner.java
@@ -61,6 +61,7 @@
    * NOTE: This constructor is called from {@link ApplicationRunners} through reflection.
    * Please refrain from updating the signature or removing this constructor unless the caller has changed the interface.
    */
+  @SuppressWarnings("unused") /* used via reflection */
   public SamzaSqlApplicationRunner(SamzaApplication app, Config config) {
     this(app, false, config);
   }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelMessageSerdeFactory.java b/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelMessageSerdeFactory.java
index c3906bd..dc016f1 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelMessageSerdeFactory.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelMessageSerdeFactory.java
@@ -46,7 +46,7 @@
         ObjectMapper mapper = new ObjectMapper();
         // Enable object typing to handle nested records
         mapper.enableDefaultTyping();
-        return mapper.readValue(new String(bytes, "UTF-8"), new TypeReference<SamzaSqlRelMessage>() {});
+        return mapper.readValue(new String(bytes, "UTF-8"), new TypeReference<SamzaSqlRelMessage>() { });
       } catch (Exception e) {
         throw new SamzaException(e);
       }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelRecordSerdeFactory.java b/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelRecordSerdeFactory.java
index a78bcda..dd62bfc 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelRecordSerdeFactory.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/serializers/SamzaSqlRelRecordSerdeFactory.java
@@ -46,7 +46,7 @@
         ObjectMapper mapper = new ObjectMapper();
         // Enable object typing to handle nested records
         mapper.enableDefaultTyping();
-        return mapper.readValue(new String(bytes, "UTF-8"), new TypeReference<SamzaSqlRelRecord>() {});
+        return mapper.readValue(new String(bytes, "UTF-8"), new TypeReference<SamzaSqlRelRecord>() { });
       } catch (Exception e) {
         throw new SamzaException(e);
       }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/FilterTranslator.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/FilterTranslator.java
index 6515dc2..b42569c 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/FilterTranslator.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/FilterTranslator.java
@@ -80,6 +80,7 @@
       this.context = context;
       this.translatorContext = ((SamzaSqlApplicationContext) context.getApplicationTaskContext()).getTranslatorContexts().get(queryId);
       this.filter = (LogicalFilter) this.translatorContext.getRelNode(filterId);
+      LOG.info("Compiling Operator {}", filter.getDigest());
       this.expr = this.translatorContext.getExpressionCompiler().compile(filter.getInputs(), Collections.singletonList(filter.getCondition()));
       ContainerContext containerContext = context.getContainerContext();
       metricsRegistry = containerContext.getContainerMetricsRegistry();
@@ -96,14 +97,20 @@
     public boolean apply(SamzaSqlRelMessage message) {
       long startProcessing = System.nanoTime();
       Object[] result = new Object[1];
+      Object[] inputRow = ProjectTranslator.convertToJavaRow(message.getSamzaSqlRelRecord());
       try {
         expr.execute(translatorContext.getExecutionContext(), context, translatorContext.getDataContext(),
-            message.getSamzaSqlRelRecord().getFieldValues().toArray(), result);
+            inputRow, result);
       } catch (Exception e) {
         String errMsg = String.format("Handling the following rel message ran into an error. %s", message);
         LOG.error(errMsg, e);
         throw new SamzaException(errMsg, e);
       }
+      if (result[0] == null) {
+        // Case filter is applied on a null value -> result is neither true or false.
+        // Samza Filter operator supports primitive return types only, return false as per current convention.
+        return false;
+      }
       if (result[0] instanceof Boolean) {
         boolean retVal = (Boolean) result[0];
         LOG.debug(
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinInputNode.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinInputNode.java
index d952194..c51ff25 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinInputNode.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinInputNode.java
@@ -20,14 +20,21 @@
 package org.apache.samza.sql.translator;
 
 import java.util.List;
+import java.util.Map;
+import org.apache.calcite.plan.hep.HepRelVertex;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.TableScan;
+import org.apache.calcite.rel.logical.LogicalFilter;
+import org.apache.calcite.rel.logical.LogicalProject;
 import org.apache.samza.sql.interfaces.SqlIOConfig;
+import org.apache.samza.table.descriptors.CachingTableDescriptor;
+import org.apache.samza.table.descriptors.RemoteTableDescriptor;
 
 
 /**
  * This class represents the input node for the join. It can be either a table or a stream.
  */
-class JoinInputNode {
+public class JoinInputNode {
 
   // Calcite RelNode corresponding to the input
   private final RelNode relNode;
@@ -37,7 +44,7 @@
   private final InputType inputType;
   private final boolean isPosOnRight;
 
-  enum InputType {
+  public enum InputType {
     STREAM,
     LOCAL_TABLE,
     REMOTE_TABLE
@@ -67,10 +74,47 @@
   }
 
   String getSourceName() {
-    return SqlIOConfig.getSourceFromSourceParts(relNode.getTable().getQualifiedName());
+    return SqlIOConfig.getSourceFromSourceParts(getTableScan(relNode).getTable().getQualifiedName());
   }
 
   boolean isPosOnRight() {
     return isPosOnRight;
   }
+
+  public static JoinInputNode.InputType getInputType(
+      RelNode relNode, Map<String, SqlIOConfig> systemStreamConfigBySource) {
+
+    // NOTE: Any intermediate form of a join is always a stream. Eg: For the second level join of
+    // stream-table-table join, the left side of the join is join output, which we always
+    // assume to be a stream. The intermediate stream won't be an instance of TableScan.
+    // The join key(s) for the table could be an udf in which case the relNode would be LogicalProject.
+
+    // If the relNode is a vertex in a DAG, get the real relNode. This happens due to query optimization.
+    if (relNode instanceof HepRelVertex) {
+      relNode = ((HepRelVertex) relNode).getCurrentRel();
+    }
+
+    if (relNode instanceof TableScan || relNode instanceof LogicalProject || relNode instanceof LogicalFilter) {
+      SqlIOConfig sourceTableConfig = JoinTranslator.resolveSQlIOForTable(relNode, systemStreamConfigBySource);
+      if (sourceTableConfig == null || !sourceTableConfig.getTableDescriptor().isPresent()) {
+        return JoinInputNode.InputType.STREAM;
+      } else if (sourceTableConfig.getTableDescriptor().get() instanceof RemoteTableDescriptor ||
+          sourceTableConfig.getTableDescriptor().get() instanceof CachingTableDescriptor) {
+        return JoinInputNode.InputType.REMOTE_TABLE;
+      } else {
+        return JoinInputNode.InputType.LOCAL_TABLE;
+      }
+    } else {
+      return JoinInputNode.InputType.STREAM;
+    }
+  }
+
+  private static TableScan getTableScan(RelNode relNode) {
+    if (relNode instanceof TableScan) {
+      return (TableScan) relNode;
+    }
+    // we deal with Single inputs filter/project
+    assert relNode.getInputs().size() == 1;
+    return getTableScan(relNode.getInput(0));
+  }
 }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinTranslator.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinTranslator.java
index 635d0ba..26d9fa0 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinTranslator.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/JoinTranslator.java
@@ -20,12 +20,15 @@
 package org.apache.samza.sql.translator;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.List;
-
-import org.apache.calcite.adapter.enumerable.EnumerableTableScan;
+import java.util.stream.Collectors;
+import java.util.Map;
 import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.plan.hep.HepRelVertex;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.TableScan;
@@ -34,12 +37,15 @@
 import org.apache.calcite.rel.logical.LogicalProject;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexShuttle;
+import org.apache.calcite.rex.RexSlot;
 import org.apache.calcite.sql.SqlExplainFormat;
 import org.apache.calcite.sql.SqlExplainLevel;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.type.SqlTypeName;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.SamzaException;
 import org.apache.samza.operators.KV;
 import org.apache.samza.operators.MessageStream;
@@ -51,13 +57,11 @@
 import org.apache.samza.sql.serializers.SamzaSqlRelMessageSerdeFactory;
 import org.apache.samza.sql.serializers.SamzaSqlRelRecordSerdeFactory;
 import org.apache.samza.table.Table;
-import org.apache.samza.table.descriptors.CachingTableDescriptor;
-import org.apache.samza.table.descriptors.RemoteTableDescriptor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.samza.sql.data.SamzaSqlRelMessage.getSamzaSqlCompositeKeyFieldNames;
 import static org.apache.samza.sql.data.SamzaSqlRelMessage.createSamzaSqlCompositeKey;
+import static org.apache.samza.sql.data.SamzaSqlRelMessage.getSamzaSqlCompositeKeyFieldNames;
 
 
 /**
@@ -76,7 +80,7 @@
 class JoinTranslator {
 
   private static final Logger log = LoggerFactory.getLogger(JoinTranslator.class);
-  private String logicalOpId;
+  private final String logicalOpId;
   private final String intermediateStreamPrefix;
   private final int queryId;
   private final TranslatorInputMetricsMapFunction inputMetricsMF;
@@ -91,14 +95,16 @@
   }
 
   void translate(final LogicalJoin join, final TranslatorContext translatorContext) {
-    JoinInputNode.InputType inputTypeOnLeft = getInputType(join.getLeft(), translatorContext);
-    JoinInputNode.InputType inputTypeOnRight = getInputType(join.getRight(), translatorContext);
+    JoinInputNode.InputType inputTypeOnLeft = JoinInputNode.getInputType(join.getLeft(),
+        translatorContext.getExecutionContext().getSamzaSqlApplicationConfig().getInputSystemStreamConfigBySource());
+    JoinInputNode.InputType inputTypeOnRight = JoinInputNode.getInputType(join.getRight(),
+        translatorContext.getExecutionContext().getSamzaSqlApplicationConfig().getInputSystemStreamConfigBySource());
 
     // Do the validation of join query
     validateJoinQuery(join, inputTypeOnLeft, inputTypeOnRight);
 
     // At this point, one of the sides is a table. Let's figure out if it is on left or right side.
-    boolean isTablePosOnRight = (inputTypeOnRight != JoinInputNode.InputType.STREAM);
+    boolean isTablePosOnRight = inputTypeOnRight != JoinInputNode.InputType.STREAM;
 
     // stream and table keyIds are used to extract the join condition field (key) names and values out of the stream
     // and table records.
@@ -106,8 +112,26 @@
     List<Integer> tableKeyIds = new LinkedList<>();
 
     // Fetch the stream and table indices corresponding to the fields given in the join condition.
-    populateStreamAndTableKeyIds(((RexCall) join.getCondition()).getOperands(), join, isTablePosOnRight, streamKeyIds,
-        tableKeyIds);
+
+    final int leftSideSize = join.getLeft().getRowType().getFieldCount();
+    final int tableStartIdx = isTablePosOnRight ? leftSideSize : 0;
+    final int streamStartIdx = isTablePosOnRight ? 0 : leftSideSize;
+    final int tableEndIdx = isTablePosOnRight ? join.getRowType().getFieldCount() : leftSideSize;
+    join.getCondition().accept(new RexShuttle() {
+      @Override
+      public RexNode visitInputRef(RexInputRef inputRef) {
+        validateJoinKeyType(inputRef); // Validate the type of the input ref.
+        int index = inputRef.getIndex();
+        if (index >= tableStartIdx && index < tableEndIdx) {
+          tableKeyIds.add(index - tableStartIdx);
+        } else {
+          streamKeyIds.add(index - streamStartIdx);
+        }
+        return inputRef;
+      }
+    });
+    Collections.sort(tableKeyIds);
+    Collections.sort(streamKeyIds);
 
     // Get the two input nodes (stream and table nodes) for the join.
     JoinInputNode streamNode = new JoinInputNode(isTablePosOnRight ? join.getLeft() : join.getRight(), streamKeyIds,
@@ -142,8 +166,16 @@
 
     if (tableNode.isRemoteTable()) {
       String remoteTableName = tableNode.getSourceName();
-      StreamTableJoinFunction joinFn = new SamzaSqlRemoteTableJoinFunction(context.getMsgConverter(remoteTableName),
-          context.getTableKeyConverter(remoteTableName), streamNode, tableNode, join.getJoinType(), queryId);
+      MessageStream<SamzaSqlRelMessage> operatorStack = context.getMessageStream(tableNode.getRelNode().getId());
+      final  StreamTableJoinFunction<Object, SamzaSqlRelMessage, KV, SamzaSqlRelMessage> joinFn;
+      if (operatorStack != null && operatorStack instanceof MessageStreamCollector) {
+        joinFn = new SamzaSqlRemoteTableJoinFunction(context.getMsgConverter(remoteTableName),
+            context.getTableKeyConverter(remoteTableName), streamNode, tableNode, join.getJoinType(), queryId,
+            (MessageStreamCollector) operatorStack);
+      } else {
+        joinFn = new SamzaSqlRemoteTableJoinFunction(context.getMsgConverter(remoteTableName),
+            context.getTableKeyConverter(remoteTableName), streamNode, tableNode, join.getJoinType(), queryId);
+      }
 
       return inputStream
           .map(inputMetricsMF)
@@ -152,7 +184,11 @@
 
     // Join with the local table
 
-    StreamTableJoinFunction joinFn = new SamzaSqlLocalTableJoinFunction(streamNode, tableNode, join.getJoinType());
+    StreamTableJoinFunction<SamzaSqlRelRecord,
+        SamzaSqlRelMessage,
+        KV<SamzaSqlRelRecord, SamzaSqlRelMessage>,
+        SamzaSqlRelMessage>
+        joinFn = new SamzaSqlLocalTableJoinFunction(streamNode, tableNode, join.getJoinType());
 
     SamzaSqlRelRecordSerdeFactory.SamzaSqlRelRecordSerde keySerde =
         (SamzaSqlRelRecordSerdeFactory.SamzaSqlRelRecordSerde) new SamzaSqlRelRecordSerdeFactory().getSerde(null, null);
@@ -181,8 +217,8 @@
       throw new SamzaException("Query with only INNER and LEFT/RIGHT OUTER join are supported.");
     }
 
-    boolean isTablePosOnLeft = (inputTypeOnLeft != JoinInputNode.InputType.STREAM);
-    boolean isTablePosOnRight = (inputTypeOnRight != JoinInputNode.InputType.STREAM);
+    boolean isTablePosOnLeft = inputTypeOnLeft != JoinInputNode.InputType.STREAM;
+    boolean isTablePosOnRight = inputTypeOnRight != JoinInputNode.InputType.STREAM;
 
     if (!isTablePosOnLeft && !isTablePosOnRight) {
       throw new SamzaException("Invalid query with both sides of join being denoted as 'stream'. "
@@ -194,95 +230,139 @@
           dumpRelPlanForNode(join));
     }
 
-    if (joinRelType.compareTo(JoinRelType.LEFT) == 0 && isTablePosOnLeft && !isTablePosOnRight) {
+    if (joinRelType.compareTo(JoinRelType.LEFT) == 0 && isTablePosOnLeft) {
       throw new SamzaException("Invalid query for outer left join. Left side of the join should be a 'stream' and "
           + "right side of join should be a 'table'. " + dumpRelPlanForNode(join));
     }
 
-    if (joinRelType.compareTo(JoinRelType.RIGHT) == 0 && isTablePosOnRight && !isTablePosOnLeft) {
+    if (joinRelType.compareTo(JoinRelType.RIGHT) == 0 && isTablePosOnRight) {
       throw new SamzaException("Invalid query for outer right join. Left side of the join should be a 'table' and "
           + "right side of join should be a 'stream'. " + dumpRelPlanForNode(join));
     }
 
-    validateJoinCondition(join.getCondition());
-  }
+    final List<RexNode> conjunctionList = new ArrayList<>();
+    decomposeAndValidateConjunction(join.getCondition(), conjunctionList);
 
-  private void validateJoinCondition(RexNode operand) {
-    if (!(operand instanceof RexCall)) {
-      throw new SamzaException("SQL Query is not supported. Join condition operand " + operand +
-          " is of type " + operand.getClass());
-    }
-
-    RexCall condition = (RexCall) operand;
-
-    if (condition.isAlwaysTrue()) {
+    if (conjunctionList.isEmpty()) {
       throw new SamzaException("Query results in a cross join, which is not supported. Please optimize the query."
           + " It is expected that the joins should include JOIN ON operator in the sql query.");
     }
+    //TODO Not sure why we can not allow literal as part of the join condition will revisit this in another scope
+    conjunctionList.forEach(rexNode -> rexNode.accept(new RexShuttle() {
+      @Override
+      public RexNode visitLiteral(RexLiteral literal) {
+        throw new SamzaException(
+            "Join Condition can not allow literal " + literal.toString() + " join node" + join.getDigest());
+      }
+    }));
+    final JoinInputNode.InputType rootTableInput = isTablePosOnRight ? inputTypeOnRight : inputTypeOnLeft;
+    if (rootTableInput.compareTo(JoinInputNode.InputType.REMOTE_TABLE) != 0) {
+      // it is not a remote table all is good we do not have to validate the project on key Column
+      return;
+    }
 
-    if (condition.getKind() != SqlKind.EQUALS && condition.getKind() != SqlKind.AND) {
+    /*
+    For remote Table we need to validate The join Condition and The project that is above remote table scan.
+     - As of today Filter need to be exactly one equi-join using the __key__ column (see SAMZA-2554)
+     - The Project on the top of the remote table has to contain only simple input references to any of the column used in the join.
+    */
+
+    // First let's collect the ref of columns used by the join condition.
+    List<RexInputRef> refCollector = new ArrayList<>();
+    join.getCondition().accept(new RexShuttle() {
+      @Override
+      public RexNode visitInputRef(RexInputRef inputRef) {
+        refCollector.add(inputRef);
+        return inputRef;
+      }
+    });
+    // start index of the Remote table within the Join Row
+    final int tableStartIndex = isTablePosOnRight ? join.getLeft().getRowType().getFieldCount() : 0;
+    // end index of the Remote table withing the Join Row
+    final int tableEndIndex =
+        isTablePosOnRight ? join.getRowType().getFieldCount() : join.getLeft().getRowType().getFieldCount();
+
+    List<Integer> tableRefsIdx = refCollector.stream()
+        .map(RexSlot::getIndex)
+        .filter(x -> (tableStartIndex <= x) && (x < tableEndIndex)) // collect all the refs form table side
+        .map(x -> x - tableStartIndex) // re-adjust the offset
+        .sorted()
+        .collect(Collectors.toList()); // we have a list with all the input from table side with 0 based index.
+
+    // Validate the Condition must contain a ref to remote table primary key column.
+
+    if (conjunctionList.size() != 1 || tableRefsIdx.size() != 1) {
+      //TODO We can relax this by allowing another filter to be evaluated post lookup see SAMZA-2554
+      throw new SamzaException(
+          "Invalid query for join condition must contain exactly one predicate for remote table on __key__ column "
+              + dumpRelPlanForNode(join));
+    }
+
+    // Validate the Project, follow each input and ensure that it is a simple ref with no rexCall in the way.
+    if (!isValidRemoteJoinRef(tableRefsIdx.get(0), isTablePosOnRight ? join.getRight() : join.getLeft())) {
+      throw new SamzaException("Invalid query for join condition can not have an expression and must be reference "
+          + SamzaSqlRelMessage.KEY_NAME + " column " + dumpRelPlanForNode(join));
+    }
+  }
+
+  /**
+   * Helper method to check if the join condition can be evaluated by the remote table.
+   * It does follow single path  using the index ref path checking if it is a simple reference all the way to table scan.
+   * In case any RexCall is encountered will stop an return null as a marker otherwise will return Column Name.
+   *
+   * @param inputRexIndex rex ref index
+   * @param relNode current Rel Node
+   * @return false if any Relational Expression is encountered on the path, true if is simple ref to __key__ column.
+   */
+  private static boolean isValidRemoteJoinRef(int inputRexIndex, RelNode relNode) {
+    if (relNode instanceof TableScan) {
+      return relNode.getRowType().getFieldList().get(inputRexIndex).getName().equals(SamzaSqlRelMessage.KEY_NAME);
+    }
+    // has to be a single rel kind filter/project/table scan
+    Preconditions.checkState(relNode.getInputs().size() == 1,
+        "Has to be single input RelNode and got " + relNode.getDigest());
+    if (relNode instanceof LogicalFilter) {
+      return isValidRemoteJoinRef(inputRexIndex, relNode.getInput(0));
+    }
+    RexNode inputRef = ((LogicalProject) relNode).getProjects().get(inputRexIndex);
+    if (inputRef instanceof RexCall) {
+      return false; // we can not push any expression as of now stop and return null.
+    }
+    return isValidRemoteJoinRef(((RexInputRef) inputRef).getIndex(), relNode.getInput(0));
+  }
+
+
+
+  /**
+   * Traverse the tree of expression and validate. Only allowed predicate is conjunction of exp1 = exp2
+   * @param rexPredicate Rex Condition
+   * @param conjunctionList result container to pull result form recursion stack.
+   */
+  public static void decomposeAndValidateConjunction(RexNode rexPredicate, List<RexNode> conjunctionList) {
+    if (rexPredicate == null || rexPredicate.isAlwaysTrue()) {
+      return;
+    }
+
+    if (rexPredicate.isA(SqlKind.AND)) {
+      for (RexNode operand : ((RexCall) rexPredicate).getOperands()) {
+        decomposeAndValidateConjunction(operand, conjunctionList);
+      }
+    } else if (rexPredicate.isA(SqlKind.EQUALS)) {
+      conjunctionList.add(rexPredicate);
+    } else {
       throw new SamzaException("Only equi-joins and AND operator is supported in join condition.");
     }
   }
 
-  // Fetch the stream and table key indices corresponding to the fields given in the join condition by parsing through
-  // the condition. Stream and table key indices are populated in streamKeyIds and tableKeyIds respectively.
-  private void populateStreamAndTableKeyIds(List<RexNode> operands, final LogicalJoin join, boolean isTablePosOnRight,
-      List<Integer> streamKeyIds, List<Integer> tableKeyIds) {
-
-    // All non-leaf operands in the join condition should be expressions.
-    if (operands.get(0) instanceof RexCall) {
-      operands.forEach(operand -> {
-        validateJoinCondition(operand);
-        populateStreamAndTableKeyIds(((RexCall) operand).getOperands(), join, isTablePosOnRight, streamKeyIds, tableKeyIds);
-      });
-      return;
-    }
-
-    // We are at the leaf of the join condition. Only binary operators are supported.
-    Validate.isTrue(operands.size() == 2);
-
-    // Only reference operands are supported in row expressions and not constants.
-    // a.key = b.key is supported with a.key and b.key being reference operands.
-    // a.key = "constant" is not yet supported.
-    if (!(operands.get(0) instanceof RexInputRef) || !(operands.get(1) instanceof RexInputRef)) {
-      throw new SamzaException("SQL query is not supported. Join condition " + join.getCondition() + " should have "
-          + "reference operands but the types are " + operands.get(0).getClass() + " and " + operands.get(1).getClass());
-    }
-
-    // Join condition is commutative, meaning, a.key = b.key is equivalent to b.key = a.key.
-    // Calcite assigns the indices to the fields based on the order a and b are specified in
-    // the sql 'from' clause. Let's put the operand with smaller index in leftRef and larger
-    // index in rightRef so that the order of operands in the join condition is in the order
-    // the stream and table are specified in the 'from' clause.
-
-    RexInputRef leftRef = (RexInputRef) operands.get(0);
-    RexInputRef rightRef = (RexInputRef) operands.get(1);
-
-    // Let's validate the key used in the join condition.
-    validateJoinKeys(leftRef);
-    validateJoinKeys(rightRef);
-
-    if (leftRef.getIndex() > rightRef.getIndex()) {
-      RexInputRef tmpRef = leftRef;
-      leftRef = rightRef;
-      rightRef = tmpRef;
-    }
-
-    // Get the table key index and stream key index
-    int deltaKeyIdx = rightRef.getIndex() - join.getLeft().getRowType().getFieldCount();
-    streamKeyIds.add(isTablePosOnRight ? leftRef.getIndex() : deltaKeyIdx);
-    tableKeyIds.add(isTablePosOnRight ? deltaKeyIdx : leftRef.getIndex());
-  }
-
-  private void validateJoinKeys(RexInputRef ref) {
+  private void validateJoinKeyType(RexInputRef ref) {
     SqlTypeName sqlTypeName = ref.getType().getSqlTypeName();
 
-    // Primitive types and ANY (for the record key) are supported in the key
+    // Primitive types and Row (for the record key) and ANY for other fields like __key__
+    // TODO this need to be pulled to a common class/place that have all the supported types
     if (sqlTypeName != SqlTypeName.BOOLEAN && sqlTypeName != SqlTypeName.TINYINT && sqlTypeName != SqlTypeName.SMALLINT
         && sqlTypeName != SqlTypeName.INTEGER && sqlTypeName != SqlTypeName.CHAR && sqlTypeName != SqlTypeName.BIGINT
         && sqlTypeName != SqlTypeName.VARCHAR && sqlTypeName != SqlTypeName.DOUBLE && sqlTypeName != SqlTypeName.FLOAT
-        && sqlTypeName != SqlTypeName.ANY && sqlTypeName != SqlTypeName.OTHER) {
+        && sqlTypeName != SqlTypeName.ANY && sqlTypeName != SqlTypeName.OTHER && sqlTypeName != SqlTypeName.ROW) {
       log.error("Unsupported key type " + sqlTypeName + " used in join condition.");
       throw new SamzaException("Unsupported key type used in join condition.");
     }
@@ -294,14 +374,19 @@
         SqlExplainLevel.EXPPLAN_ATTRIBUTES);
   }
 
-  private SqlIOConfig resolveSQlIOForTable(RelNode relNode, TranslatorContext context) {
+  static SqlIOConfig resolveSQlIOForTable(RelNode relNode, Map<String, SqlIOConfig> systemStreamConfigBySource) {
     // Let's recursively get to the TableScan node to identify IO for the table.
+
+    if (relNode instanceof HepRelVertex) {
+      return resolveSQlIOForTable(((HepRelVertex) relNode).getCurrentRel(), systemStreamConfigBySource);
+    }
+
     if (relNode instanceof LogicalProject) {
-      return resolveSQlIOForTable(((LogicalProject) relNode).getInput(), context);
+      return resolveSQlIOForTable(((LogicalProject) relNode).getInput(), systemStreamConfigBySource);
     }
 
     if (relNode instanceof LogicalFilter) {
-      return resolveSQlIOForTable(((LogicalFilter) relNode).getInput(), context);
+      return resolveSQlIOForTable(((LogicalFilter) relNode).getInput(), systemStreamConfigBySource);
     }
 
     // We return null for table IO as the table seems to be involved in another join. The output of stream-table join
@@ -316,39 +401,17 @@
     }
 
     String sourceName = SqlIOConfig.getSourceFromSourceParts(relNode.getTable().getQualifiedName());
-    SqlIOConfig sourceConfig =
-        context.getExecutionContext().getSamzaSqlApplicationConfig().getInputSystemStreamConfigBySource().get(sourceName);
+    SqlIOConfig sourceConfig = systemStreamConfigBySource.get(sourceName);
     if (sourceConfig == null) {
       throw new SamzaException("Unsupported source found in join statement: " + sourceName);
     }
     return sourceConfig;
   }
 
-  private JoinInputNode.InputType getInputType(RelNode relNode, TranslatorContext context) {
-
-    // NOTE: Any intermediate form of a join is always a stream. Eg: For the second level join of
-    // stream-table-table join, the left side of the join is join output, which we always
-    // assume to be a stream. The intermediate stream won't be an instance of EnumerableTableScan.
-    // The join key(s) for the table could be an udf in which case the relNode would be LogicalProject.
-
-    if (relNode instanceof EnumerableTableScan || relNode instanceof LogicalProject) {
-      SqlIOConfig sourceTableConfig = resolveSQlIOForTable(relNode, context);
-      if (sourceTableConfig == null || !sourceTableConfig.getTableDescriptor().isPresent()) {
-        return JoinInputNode.InputType.STREAM;
-      } else if (sourceTableConfig.getTableDescriptor().get() instanceof RemoteTableDescriptor ||
-          sourceTableConfig.getTableDescriptor().get() instanceof CachingTableDescriptor) {
-        return JoinInputNode.InputType.REMOTE_TABLE;
-      } else {
-        return JoinInputNode.InputType.LOCAL_TABLE;
-      }
-    } else {
-      return JoinInputNode.InputType.STREAM;
-    }
-  }
-
   private Table getTable(JoinInputNode tableNode, TranslatorContext context) {
 
-    SqlIOConfig sourceTableConfig = resolveSQlIOForTable(tableNode.getRelNode(), context);
+    SqlIOConfig sourceTableConfig = resolveSQlIOForTable(tableNode.getRelNode(),
+        context.getExecutionContext().getSamzaSqlApplicationConfig().getInputSystemStreamConfigBySource());
 
     if (sourceTableConfig == null || !sourceTableConfig.getTableDescriptor().isPresent()) {
       String errMsg = "Failed to resolve table source in join operation: node=" + tableNode.getRelNode();
@@ -392,9 +455,13 @@
   }
 
   @VisibleForTesting
-  public TranslatorInputMetricsMapFunction getInputMetricsMF() { return this.inputMetricsMF; }
+  public TranslatorInputMetricsMapFunction getInputMetricsMF() {
+    return this.inputMetricsMF;
+  }
 
   @VisibleForTesting
-  public TranslatorOutputMetricsMapFunction getOutputMetricsMF() { return this.outputMetricsMF; }
+  public TranslatorOutputMetricsMapFunction getOutputMetricsMF() {
+    return this.outputMetricsMF;
+  }
 
 }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/LogicalAggregateTranslator.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/LogicalAggregateTranslator.java
index 10dbb2e..78fd6aa 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/LogicalAggregateTranslator.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/LogicalAggregateTranslator.java
@@ -76,19 +76,19 @@
                 .setAccumulationMode(
                     AccumulationMode.DISCARDING), changeLogStorePrefix + "_tumblingWindow_" + logicalOpId)
             .map(windowPane -> {
-                List<String> fieldNames = windowPane.getKey().getKey().getSamzaSqlRelRecord().getFieldNames();
-                List<Object> fieldValues = windowPane.getKey().getKey().getSamzaSqlRelRecord().getFieldValues();
-                fieldNames.add(aggFieldNames.get(0));
-                fieldValues.add(windowPane.getMessage());
-                return new SamzaSqlRelMessage(fieldNames, fieldValues, new SamzaSqlRelMsgMetadata(0L, 0L));
-              });
+              List<String> fieldNames = windowPane.getKey().getKey().getSamzaSqlRelRecord().getFieldNames();
+              List<Object> fieldValues = windowPane.getKey().getKey().getSamzaSqlRelRecord().getFieldValues();
+              fieldNames.add(aggFieldNames.get(0));
+              fieldValues.add(windowPane.getMessage());
+              return new SamzaSqlRelMessage(fieldNames, fieldValues, new SamzaSqlRelMsgMetadata(0L, 0L));
+            });
     context.registerMessageStream(aggregate.getId(), outputStream);
     outputStream.map(new TranslatorOutputMetricsMapFunction(logicalOpId));
   }
 
   private ArrayList<String> getAggFieldNames(LogicalAggregate aggregate) {
     return aggregate.getAggCallList().stream().collect(ArrayList::new, (names, aggCall) -> names.add(aggCall.getName()),
-        (n1, n2) -> n1.addAll(n2));
+      (n1, n2) -> n1.addAll(n2));
   }
 
   void validateAggregateFunctions(final LogicalAggregate aggregate) {
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/MessageStreamCollector.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/MessageStreamCollector.java
new file mode 100644
index 0000000..0ddc136
--- /dev/null
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/MessageStreamCollector.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.sql.translator;
+
+import java.io.Closeable;
+import java.io.Serializable;
+import java.time.Duration;
+import java.util.ArrayDeque;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.function.Function;
+import org.apache.samza.context.Context;
+import org.apache.samza.operators.KV;
+import org.apache.samza.operators.MessageStream;
+import org.apache.samza.operators.OutputStream;
+import org.apache.samza.operators.functions.AsyncFlatMapFunction;
+import org.apache.samza.operators.functions.FilterFunction;
+import org.apache.samza.operators.functions.FlatMapFunction;
+import org.apache.samza.operators.functions.JoinFunction;
+import org.apache.samza.operators.functions.MapFunction;
+import org.apache.samza.operators.functions.SinkFunction;
+import org.apache.samza.operators.functions.StreamTableJoinFunction;
+import org.apache.samza.operators.windows.Window;
+import org.apache.samza.operators.windows.WindowPane;
+import org.apache.samza.serializers.KVSerde;
+import org.apache.samza.serializers.Serde;
+import org.apache.samza.sql.data.SamzaSqlRelMessage;
+import org.apache.samza.table.Table;
+
+
+/**
+ * Collector of Map and Filter Samza Functions to collect call stack on the top of Remote table.
+ * This Collector will be used by Join operator and trigger it when applying the join function post lookup.
+ *
+ * Note that this is needed because the Remote Table can not expose a proper {@code MessageStream}.
+ * It is a work around to minimize the amount of code changes of the current Query Translator {@link org.apache.samza.sql.translator.QueryTranslator},
+ * But in an ideal world, we should use Calcite planner in conventional way we can combine function when via translation of RelNodes.
+ */
+class MessageStreamCollector implements MessageStream<SamzaSqlRelMessage>, Serializable, Closeable {
+
+  /**
+   * Queue First in First to be Fired order of the operators on the top of Remote Table Scan.
+   */
+  private final Deque<MapFunction<? super SamzaSqlRelMessage, ? extends SamzaSqlRelMessage>> mapFnCallQueue =
+      new ArrayDeque<>();
+
+  /**
+   * Function to chain the call to close from each operator.
+   */
+  private transient Function<Void, Void> closeFn = aVoid -> null;
+
+  @Override
+  public <OM> MessageStream<OM> map(MapFunction<? super SamzaSqlRelMessage, ? extends OM> mapFn) {
+    mapFnCallQueue.offer((MapFunction<? super SamzaSqlRelMessage, ? extends SamzaSqlRelMessage>) mapFn);
+    return (MessageStream<OM>) this;
+  }
+
+  @Override
+  public MessageStream<SamzaSqlRelMessage> filter(FilterFunction<? super SamzaSqlRelMessage> filterFn) {
+    mapFnCallQueue.offer(new FilterMapAdapter(filterFn));
+    return this;
+  }
+
+  /**
+   * This function is called by the join operator on run time to apply filter and projects post join lookup.
+   *
+   * @param context Samza Execution Context
+   * @return {code null} case filter reject the row, Samza Relational Record as it goes via Projects.
+   */
+  Function<SamzaSqlRelMessage, SamzaSqlRelMessage> getFunction(Context context) {
+    Function<SamzaSqlRelMessage, SamzaSqlRelMessage> tailFn = null;
+    Function<Void, Void> intFn = aVoid -> null; // Projects and Filters both need to be initialized.
+    closeFn = aVoid -> null;
+    // At this point we have a the queue of operator, where first in is the first operator on top of TableScan.
+    while (!mapFnCallQueue.isEmpty()) {
+      MapFunction<? super SamzaSqlRelMessage, ? extends SamzaSqlRelMessage> f = mapFnCallQueue.poll();
+      intFn = intFn.andThen((aVoid) -> {
+        f.init(context);
+        return null;
+      });
+      closeFn.andThen((aVoid) -> {
+        f.close();
+        return null;
+      });
+
+      Function<SamzaSqlRelMessage, SamzaSqlRelMessage> current = x -> x == null ? null : f.apply(x);
+      if (tailFn == null) {
+        tailFn = current;
+      } else {
+        tailFn = current.compose(tailFn);
+      }
+    }
+    // TODO TBH not sure about this need to check if Samza Framework will be okay with late init call.
+    intFn.apply(null); // Init call has to happen here.
+    return tailFn == null ? Function.identity() : tailFn;
+  }
+
+  /**
+   * Filter adapter is used to compose filters with {@code MapFunction<SamzaSqlRelMessage, SamzaSqlRelMessage>}
+   * Filter function will return {@code null} when input is {@code null} or filter condition reject current row.
+   */
+  private static class FilterMapAdapter implements MapFunction<SamzaSqlRelMessage, SamzaSqlRelMessage> {
+    private final FilterFunction<? super SamzaSqlRelMessage> filterFn;
+
+    private FilterMapAdapter(FilterFunction<? super SamzaSqlRelMessage> filterFn) {
+      this.filterFn = filterFn;
+    }
+
+    @Override
+    public SamzaSqlRelMessage apply(SamzaSqlRelMessage message) {
+      if (message != null && filterFn.apply(message)) {
+        return message;
+      }
+      // null on case no match
+      return null;
+    }
+
+    @Override
+    public void close() {
+      filterFn.close();
+    }
+
+    @Override
+    public void init(Context context) {
+      filterFn.init(context);
+    }
+  }
+
+  @Override
+  public void close() {
+    if (closeFn != null) {
+      closeFn.apply(null);
+    }
+  }
+
+  @Override
+  public <OM> MessageStream<OM> flatMap(FlatMapFunction<? super SamzaSqlRelMessage, ? extends OM> flatMapFn) {
+    return null;
+  }
+
+  @Override
+  public <OM> MessageStream<OM> flatMapAsync(
+      AsyncFlatMapFunction<? super SamzaSqlRelMessage, ? extends OM> asyncFlatMapFn) {
+    return null;
+  }
+
+  @Override
+  public void sink(SinkFunction<? super SamzaSqlRelMessage> sinkFn) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public MessageStream<SamzaSqlRelMessage> sendTo(OutputStream<SamzaSqlRelMessage> outputStream) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public <K, WV> MessageStream<WindowPane<K, WV>> window(Window<SamzaSqlRelMessage, K, WV> window, String id) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public <K, OM, JM> MessageStream<JM> join(MessageStream<OM> otherStream,
+      JoinFunction<? extends K, ? super SamzaSqlRelMessage, ? super OM, ? extends JM> joinFn, Serde<K> keySerde,
+      Serde<SamzaSqlRelMessage> messageSerde, Serde<OM> otherMessageSerde, Duration ttl, String id) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public <K, R extends KV, JM> MessageStream<JM> join(Table<R> table,
+      StreamTableJoinFunction<? extends K, ? super SamzaSqlRelMessage, ? super R, ? extends JM> joinFn,
+      Object... args) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public MessageStream<SamzaSqlRelMessage> merge(
+      Collection<? extends MessageStream<? extends SamzaSqlRelMessage>> otherStreams) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public <K, V> MessageStream<KV<K, V>> partitionBy(MapFunction<? super SamzaSqlRelMessage, ? extends K> keyExtractor,
+      MapFunction<? super SamzaSqlRelMessage, ? extends V> valueExtractor, KVSerde<K, V> serde, String id) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public <K, V> MessageStream<KV<K, V>> sendTo(Table<KV<K, V>> table, Object... args) {
+    throw new IllegalStateException("Not valid state");
+  }
+
+  @Override
+  public MessageStream<SamzaSqlRelMessage> broadcast(Serde<SamzaSqlRelMessage> serde, String id) {
+    throw new IllegalStateException("Not valid state");
+  }
+}
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/ProjectTranslator.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/ProjectTranslator.java
index bf44815..16a320e 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/ProjectTranslator.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/ProjectTranslator.java
@@ -19,12 +19,15 @@
 
 package org.apache.samza.sql.translator;
 
+import com.google.common.base.Preconditions;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
+import org.apache.calcite.DataContext;
 import org.apache.calcite.rel.core.Project;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexCall;
@@ -38,7 +41,9 @@
 import org.apache.samza.metrics.SamzaHistogram;
 import org.apache.samza.operators.MessageStream;
 import org.apache.samza.operators.functions.MapFunction;
+import org.apache.samza.sql.SamzaSqlRelRecord;
 import org.apache.samza.sql.data.Expression;
+import org.apache.samza.sql.data.SamzaSqlExecutionContext;
 import org.apache.samza.sql.data.SamzaSqlRelMessage;
 import org.apache.samza.sql.data.SamzaSqlRelMsgMetadata;
 import org.apache.samza.sql.runner.SamzaSqlApplicationContext;
@@ -50,7 +55,7 @@
  * Translator to translate the Project node in the relational graph to the corresponding StreamGraph
  * implementation.
  */
-class ProjectTranslator {
+public class ProjectTranslator {
 
   private static final Logger LOG = LoggerFactory.getLogger(ProjectTranslator.class);
   //private transient int messageIndex = 0;
@@ -61,6 +66,122 @@
   }
 
   /**
+   * Converts the resulting row from Calcite Expression Evaluator to SamzaRelRecord to be sent downstream.
+   *
+   * @param objects input objects to be converted
+   * @param rowType Calcite row type of the resulting row
+   * @return return a valid message Stream of type SamzaSqlRelRecord
+   */
+  public static SamzaSqlRelRecord buildSamzaRelRecord(Object[] objects, RelDataType rowType) {
+    Preconditions.checkNotNull(objects, "Input objects can not be null");
+    Preconditions.checkState(rowType.isStruct(), "Row Type has to be a Struct and got " + rowType.getSqlTypeName());
+    Preconditions.checkState(objects.length == rowType.getFieldCount(),
+        "Objects counts and type counts must match " + objects.length + " vs " + rowType.getFieldCount());
+    List<String> names = new ArrayList<>(rowType.getFieldNames());
+    List<Object> values = new ArrayList<>(rowType.getFieldCount());
+    for (int i = 0; i < objects.length; i++) {
+      Object val = objects[i];
+      if (val == null) {
+        values.add(null);
+        continue;
+      }
+      final RelDataType valueType = rowType.getFieldList().get(i).getType();
+      values.add(convertToSamzaSqlType(val, valueType));
+    }
+    return new SamzaSqlRelRecord(names, values);
+  }
+
+  /**
+   * Recursively converts a Primitive Java Object to valid Samza Rel Record field type.
+   *
+   * @param value input value to be converted
+   * @param dataType value type as derived by Calcite
+   * @return SamzaRelRecord or primitive SamzaRelRecord field.
+   *
+   */
+  private static Object convertToSamzaSqlType(Object value, RelDataType dataType) {
+    if (value == null) {
+      return null;
+    }
+    switch (dataType.getSqlTypeName()) {
+      case ROW:
+        List<String> names = new ArrayList<>(dataType.getFieldNames());
+        // Row Struct is represent as Object array in Calcite.
+        Object[] row = (Object[]) value;
+        List<Object> values = new ArrayList<>(row.length);
+        for (int i = 0; i < row.length; i++) {
+          values.add(convertToSamzaSqlType(row[i], dataType.getFieldList().get(i).getType()));
+        }
+        return new SamzaSqlRelRecord(names, values);
+      case MAP:
+        Map<Object, Object> objectMap = (Map<Object, Object>) value;
+        Map<Object, Object> resultMap = new HashMap<>();
+        final RelDataType valuesType = dataType.getValueType();
+        objectMap.forEach((key, v) -> resultMap.put(key, convertToSamzaSqlType(v, valuesType)));
+        return resultMap;
+      case ARRAY:
+        List<Object> objectList = (List<Object>) value;
+        final RelDataType elementsType = dataType.getComponentType();
+        return objectList.stream().map(e -> convertToSamzaSqlType(e, elementsType)).collect(Collectors.toList());
+      case BOOLEAN:
+      case BIGINT:
+      case BINARY:
+      case INTEGER:
+      case TINYINT:
+      case DOUBLE:
+      case FLOAT:
+      case REAL:
+      case VARCHAR:
+      case CHAR:
+      case VARBINARY:
+      case ANY:
+      case OTHER:
+        // today we treat everything else as Type Any or Other, this is not ideal.
+        // this will change when adding timestamps support or more complex non java primitive types.
+        // TODO in a better world we need to add type factory that can do the conversion between calcite and samza.
+        return value;
+      default:
+        // As of today we treat everything else as type ANY
+        throw new IllegalStateException("Unknown SQL type " + dataType.getSqlTypeName());
+    }
+  }
+
+  /**
+   * Converts the Samza Record to a Java Primitive Row format that's in convention with Calcite Enum operators.
+   *
+   * @param samzaSqlRelRecord input record.
+   * @return row of Java Primitive conform to org.apache.calcite.adapter.enumerable.JavaRowFormat#ARRAY
+   */
+  public static Object[] convertToJavaRow(SamzaSqlRelRecord samzaSqlRelRecord) {
+    if (samzaSqlRelRecord == null) {
+      return null;
+    }
+    Object[] inputRow = new Object[samzaSqlRelRecord.getFieldValues().size()];
+    for (int i = 0; i < inputRow.length; i++) {
+      inputRow[i] = asPrimitiveJavaRow(samzaSqlRelRecord.getFieldValues().get(i));
+    }
+    return inputRow;
+  }
+
+  private static Object asPrimitiveJavaRow(Object inputObject) {
+    if (inputObject == null) {
+      return null;
+    }
+    if (inputObject instanceof SamzaSqlRelRecord) {
+      return convertToJavaRow((SamzaSqlRelRecord) inputObject);
+    }
+    if (inputObject instanceof List) {
+      return ((List) inputObject).stream().map(e -> asPrimitiveJavaRow(e)).collect(Collectors.toList());
+    }
+    if (inputObject instanceof Map) {
+      Map<Object, Object> objectMap = new HashMap<>();
+      ((Map<Object, Object>) inputObject).forEach((k, v) -> objectMap.put(k, asPrimitiveJavaRow(v)));
+      return objectMap;
+    }
+    return inputObject;
+  }
+
+  /**
    * ProjectMapFunction implements MapFunction to map input SamzaSqlRelMessages, one at a time, to a new
    * SamzaSqlRelMessage which consists of the projected fields
    */
@@ -94,6 +215,7 @@
       this.translatorContext =
           ((SamzaSqlApplicationContext) context.getApplicationTaskContext()).getTranslatorContexts().get(queryId);
       this.project = (Project) this.translatorContext.getRelNode(projectId);
+      LOG.info("Compiling operator {} ", project.getDigest());
       this.expr = this.translatorContext.getExpressionCompiler().compile(project.getInputs(), project.getProjects());
       ContainerContext containerContext = context.getContainerContext();
       metricsRegistry = containerContext.getContainerMetricsRegistry();
@@ -114,20 +236,19 @@
       long arrivalTime = System.nanoTime();
       RelDataType type = project.getRowType();
       Object[] output = new Object[type.getFieldCount()];
+      Object[] inputRow = convertToJavaRow(message.getSamzaSqlRelRecord());
+      SamzaSqlExecutionContext execContext = translatorContext.getExecutionContext();
+      DataContext dataRootContext = translatorContext.getDataContext();
       try {
-        expr.execute(translatorContext.getExecutionContext(), context, translatorContext.getDataContext(),
-            message.getSamzaSqlRelRecord().getFieldValues().toArray(), output);
+        expr.execute(execContext, context, dataRootContext, inputRow, output);
       } catch (Exception e) {
         String errMsg = String.format("Handling the following rel message ran into an error. %s", message);
         LOG.error(errMsg, e);
         throw new SamzaException(errMsg, e);
       }
-      List<String> names = new ArrayList<>();
-      for (int index = 0; index < output.length; index++) {
-        names.add(index, project.getNamedProjects().get(index).getValue());
-      }
+      SamzaSqlRelRecord record = buildSamzaRelRecord(output, project.getRowType());
       updateMetrics(arrivalTime, System.nanoTime(), message.getSamzaSqlRelMsgMetadata().isNewInputMessage);
-      return new SamzaSqlRelMessage(names, Arrays.asList(output), message.getSamzaSqlRelMsgMetadata());
+      return new SamzaSqlRelMessage(record, message.getSamzaSqlRelMsgMetadata());
     }
 
     /**
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/QueryTranslator.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/QueryTranslator.java
index fc16326..405fb2c 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/QueryTranslator.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/QueryTranslator.java
@@ -32,7 +32,7 @@
 import org.apache.calcite.rel.logical.LogicalFilter;
 import org.apache.calcite.rel.logical.LogicalJoin;
 import org.apache.calcite.rel.logical.LogicalProject;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.SamzaException;
 import org.apache.samza.application.descriptors.StreamApplicationDescriptor;
 import org.apache.samza.context.ApplicationContainerContext;
@@ -192,7 +192,7 @@
   void translate(SamzaSqlQueryParser.QueryInfo queryInfo, StreamApplicationDescriptor appDesc, int queryId) {
     QueryPlanner planner =
         new QueryPlanner(sqlConfig.getRelSchemaProviders(), sqlConfig.getInputSystemStreamConfigBySource(),
-            sqlConfig.getUdfMetadata());
+            sqlConfig.getUdfMetadata(), sqlConfig.isQueryPlanOptimizerEnabled());
     final RelRoot relRoot = planner.plan(queryInfo.getSelectQuery());
     SamzaSqlExecutionContext executionContext = new SamzaSqlExecutionContext(sqlConfig);
     TranslatorContext translatorContext = new TranslatorContext(appDesc, relRoot, executionContext);
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlRemoteTableJoinFunction.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlRemoteTableJoinFunction.java
index 6a93b2d..8a60502 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlRemoteTableJoinFunction.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlRemoteTableJoinFunction.java
@@ -21,6 +21,8 @@
 
 import java.util.List;
 import java.util.Objects;
+import java.util.function.Function;
+import javax.annotation.Nullable;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.samza.context.Context;
 import org.apache.samza.operators.KV;
@@ -42,15 +44,35 @@
   private transient SamzaRelTableKeyConverter relTableKeyConverter;
   private final String tableName;
   private final int queryId;
+  /**
+   * Projection and Filter Function to apply post the join lookup. Function will return null in case filter rejects row.
+   */
+  private Function<SamzaSqlRelMessage, SamzaSqlRelMessage> projectFunction;
+  /**
+   * Projects and Filters operator queue.
+   */
+  private final MessageStreamCollector messageStreamCollector;
 
-  SamzaSqlRemoteTableJoinFunction(SamzaRelConverter msgConverter, SamzaRelTableKeyConverter tableKeyConverter,
-      JoinInputNode streamNode, JoinInputNode tableNode, JoinRelType joinRelType, int queryId) {
+  public SamzaSqlRemoteTableJoinFunction(SamzaRelConverter msgConverter, SamzaRelTableKeyConverter tableKeyConverter,
+      JoinInputNode streamNode, JoinInputNode tableNode, JoinRelType joinRelType, int queryId,
+      MessageStreamCollector messageStreamCollector) {
     super(streamNode, tableNode, joinRelType);
-
     this.msgConverter = msgConverter;
     this.relTableKeyConverter = tableKeyConverter;
     this.tableName = tableNode.getSourceName();
     this.queryId = queryId;
+    this.messageStreamCollector = messageStreamCollector;
+  }
+
+  SamzaSqlRemoteTableJoinFunction(SamzaRelConverter msgConverter, SamzaRelTableKeyConverter tableKeyConverter,
+      JoinInputNode streamNode, JoinInputNode tableNode, JoinRelType joinRelType, int queryId) {
+    super(streamNode, tableNode, joinRelType);
+    this.msgConverter = msgConverter;
+    this.relTableKeyConverter = tableKeyConverter;
+    this.tableName = tableNode.getSourceName();
+    this.queryId = queryId;
+    this.projectFunction = Function.identity();
+    this.messageStreamCollector = null;
   }
 
   @Override
@@ -59,13 +81,24 @@
         ((SamzaSqlApplicationContext) context.getApplicationTaskContext()).getTranslatorContexts().get(queryId);
     this.msgConverter = translatorContext.getMsgConverter(tableName);
     this.relTableKeyConverter = translatorContext.getTableKeyConverter(tableName);
+    if (messageStreamCollector != null) {
+      projectFunction = messageStreamCollector.getFunction(context);
+    }
   }
 
+  /**
+   * Compute the projection and filter post join lookup.
+   *
+   * @param record input record as result of lookup
+   * @return the projected row or {@code null} if Row doesn't pass the filter condition.
+   */
   @Override
+  @Nullable
   protected List<Object> getTableRelRecordFieldValues(KV record) {
     // Using the message rel converter, convert message to sql rel message and add to output values.
-    SamzaSqlRelMessage relMessage = msgConverter.convertToRelMessage(record);
-    return relMessage.getSamzaSqlRelRecord().getFieldValues();
+    final SamzaSqlRelMessage relMessage = msgConverter.convertToRelMessage(record);
+    final SamzaSqlRelMessage result = projectFunction.apply(relMessage);
+    return result == null ? null : result.getSamzaSqlRelRecord().getFieldValues();
   }
 
   @Override
@@ -76,6 +109,8 @@
       return null;
     }
     // Using the table key converter, convert message key from rel format to the record key format.
+    // TODO: On way to avoid the object type here is to ensure that:
+    // table's key is a SamzaRelRecord or any well defined type when defining the table descriptor
     return relTableKeyConverter.convertToTableKeyFormat(keyRecord);
   }
 
@@ -83,4 +118,12 @@
   public Object getRecordKey(KV record) {
     return record.getKey();
   }
+
+  @Override
+  public void close() {
+    super.close();
+    if (messageStreamCollector != null) {
+      messageStreamCollector.close();
+    }
+  }
 }
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlTableJoinFunction.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlTableJoinFunction.java
index 0715af3..e8fa451 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlTableJoinFunction.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/SamzaSqlTableJoinFunction.java
@@ -21,8 +21,9 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.stream.Collectors;
 import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.operators.functions.StreamTableJoinFunction;
 import org.apache.samza.sql.SamzaSqlRelRecord;
 import org.apache.samza.sql.data.SamzaSqlRelMessage;
@@ -48,6 +49,7 @@
   // Table field names are used in the outer join when the table record is not found.
   private final ArrayList<String> tableFieldNames;
   private final ArrayList<String> outFieldNames;
+  final private List<Object> nullRow;
 
   SamzaSqlTableJoinFunction(JoinInputNode streamNode, JoinInputNode tableNode, JoinRelType joinRelType) {
     this.joinRelType = joinRelType;
@@ -69,6 +71,7 @@
       outFieldNames.addAll(tableFieldNames);
       outFieldNames.addAll(streamNode.getFieldNames());
     }
+    nullRow = tableFieldNames.stream().map(x -> null).collect(Collectors.toList());
   }
 
   @Override
@@ -93,7 +96,10 @@
 
     // Add the table record fields.
     if (record != null) {
-      outFieldValues.addAll(getTableRelRecordFieldValues(record));
+      List<Object> row = getTableRelRecordFieldValues(record);
+      // null in case the filter did not match thus row has to be removed if inner join or padded null case outer join
+      if (row == null && joinRelType.compareTo(JoinRelType.INNER) == 0) return null;
+      outFieldValues.addAll(row == null ? nullRow : row);
     } else {
       // Table record could be null as the record could not be found in the store. This can
       // happen for outer joins. Add nulls to all the field values in the output message.
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/ScanTranslator.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/ScanTranslator.java
index aefa04b..87c4e00 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/ScanTranslator.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/ScanTranslator.java
@@ -22,7 +22,7 @@
 import java.util.List;
 import java.util.Map;
 import org.apache.calcite.rel.core.TableScan;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.SamzaException;
 import org.apache.samza.application.descriptors.StreamApplicationDescriptor;
 import org.apache.samza.context.ContainerContext;
@@ -37,7 +37,6 @@
 import org.apache.samza.sql.SamzaSqlInputMessage;
 import org.apache.samza.sql.SamzaSqlInputTransformer;
 import org.apache.samza.sql.data.SamzaSqlRelMessage;
-import org.apache.samza.sql.data.SamzaSqlRelMsgMetadata;
 import org.apache.samza.sql.interfaces.SamzaRelConverter;
 import org.apache.samza.sql.interfaces.SqlIOConfig;
 import org.apache.samza.sql.runner.SamzaSqlApplicationContext;
@@ -171,7 +170,10 @@
     // SqlIOResolverFactory.
     // For local table, even though table descriptor is already defined, we still need to create the input stream
     // descriptor to load the local table.
+    // To handle case where a project or filter is pushed to Remote table Scan will collect the operators and feed it to the join operator.
+    // TODO In an ideal world this has to change and use Calcite Pattern matching to translate the plan.
     if (isRemoteTable) {
+      context.registerMessageStream(tableScan.getId(), new MessageStreamCollector());
       return;
     }
 
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorContext.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorContext.java
index 5990897..7d85652 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorContext.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorContext.java
@@ -34,6 +34,7 @@
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.samza.application.descriptors.StreamApplicationDescriptor;
 import org.apache.samza.operators.MessageStream;
+import org.apache.samza.sql.data.SamzaSqlRelMessage;
 import org.apache.samza.sql.interfaces.SamzaRelTableKeyConverter;
 import org.apache.samza.system.descriptors.DelegatingSystemDescriptor;
 import org.apache.samza.sql.data.RexToJavaCompiler;
@@ -52,7 +53,7 @@
   private final RexToJavaCompiler compiler;
   private final Map<String, SamzaRelConverter> relSamzaConverters;
   private final Map<String, SamzaRelTableKeyConverter> relTableKeyConverters;
-  private final Map<Integer, MessageStream> messageStreams;
+  private final Map<Integer, MessageStream<SamzaSqlRelMessage>> messageStreams;
   private final Map<Integer, RelNode> relNodes;
   private final Map<String, DelegatingSystemDescriptor> systemDescriptors;
 
@@ -199,7 +200,7 @@
    * @param id the id
    * @return the message stream
    */
-  MessageStream getMessageStream(int id) {
+  MessageStream<SamzaSqlRelMessage> getMessageStream(int id) {
     return messageStreams.get(id);
   }
 
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorInputMetricsMapFunction.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorInputMetricsMapFunction.java
index ef3028e..5cf9203 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorInputMetricsMapFunction.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorInputMetricsMapFunction.java
@@ -37,7 +37,9 @@
 
   private final String logicalOpId;
 
-  TranslatorInputMetricsMapFunction(String logicalOpId) { this.logicalOpId = logicalOpId; }
+  TranslatorInputMetricsMapFunction(String logicalOpId) {
+    this.logicalOpId = logicalOpId;
+  }
 
   /**
    * initializes the TranslatorOutputMetricsMapFunction before any message is processed
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorOutputMetricsMapFunction.java b/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorOutputMetricsMapFunction.java
index 3e85bed..7cb0e5d 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorOutputMetricsMapFunction.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/translator/TranslatorOutputMetricsMapFunction.java
@@ -40,7 +40,9 @@
 
   private final String logicalOpId;
 
-  TranslatorOutputMetricsMapFunction(String logicalOpId) { this.logicalOpId = logicalOpId; }
+  TranslatorOutputMetricsMapFunction(String logicalOpId) {
+    this.logicalOpId = logicalOpId;
+  }
 
   /**
    * initializes the TranslatorOutputMetricsMapFunction before any message is processed
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/udf/GetNestedField.java b/samza-sql/src/main/java/org/apache/samza/sql/udf/GetNestedField.java
new file mode 100644
index 0000000..87cf486
--- /dev/null
+++ b/samza-sql/src/main/java/org/apache/samza/sql/udf/GetNestedField.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.sql.udf;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import java.lang.reflect.Type;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.calcite.adapter.enumerable.CallImplementor;
+import org.apache.calcite.adapter.enumerable.EnumUtils;
+import org.apache.calcite.adapter.enumerable.NullPolicy;
+import org.apache.calcite.adapter.enumerable.RexImpTable;
+import org.apache.calcite.jdbc.JavaTypeFactoryImpl;
+import org.apache.calcite.linq4j.tree.ConstantExpression;
+import org.apache.calcite.linq4j.tree.Expression;
+import org.apache.calcite.linq4j.tree.ExpressionType;
+import org.apache.calcite.linq4j.tree.Expressions;
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.schema.Function;
+import org.apache.calcite.schema.FunctionParameter;
+import org.apache.calcite.schema.ImplementableFunction;
+import org.apache.calcite.schema.ScalarFunction;
+import org.apache.calcite.sql.SqlCallBinding;
+import org.apache.calcite.sql.SqlFunction;
+import org.apache.calcite.sql.SqlIdentifier;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlOperandCountRange;
+import org.apache.calcite.sql.SqlOperatorBinding;
+import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.type.OperandTypes;
+import org.apache.calcite.sql.type.SqlOperandCountRanges;
+import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.sql.validate.SqlUserDefinedFunction;
+
+import static org.apache.calcite.schema.impl.ReflectiveFunctionBase.builder;
+
+
+/**
+ * Operator to extract nested Rows or Fields form a struct row type using a dotted path.
+ * The goal of this operator is two-fold.
+ * First it is a temporary fix for https://issues.apache.org/jira/browse/CALCITE-4065 to extract a row from a row.
+ * Second it will enable smooth backward compatible migration from existing udf that relies on legacy row format.
+ */
+public class GetNestedField extends SqlUserDefinedFunction {
+
+  public static final SqlFunction INSTANCE = new GetNestedField(new ExtractFunction());
+
+  public GetNestedField(Function function) {
+    super(new SqlIdentifier("GetNestedField", SqlParserPos.ZERO), null, null, null, ImmutableList.of(), function);
+  }
+
+  @Override
+  public SqlOperandCountRange getOperandCountRange() {
+    return SqlOperandCountRanges.of(2);
+  }
+
+  @Override
+  public boolean checkOperandTypes(SqlCallBinding callBinding, boolean throwOnFailure) {
+    final SqlNode left = callBinding.operand(0);
+    final SqlNode right = callBinding.operand(1);
+    final RelDataType type = callBinding.getValidator().deriveType(callBinding.getScope(), left);
+    boolean isRow = true;
+    if (type.getSqlTypeName() != SqlTypeName.ROW) {
+      isRow = false;
+    } else if (type.getSqlIdentifier().isStar()) {
+      isRow = false;
+    }
+    if (!isRow && throwOnFailure) {
+      throw callBinding.newValidationSignatureError();
+    }
+    return isRow && OperandTypes.STRING.checkSingleOperandType(callBinding, right, 0, throwOnFailure);
+  }
+
+  @Override
+  public RelDataType inferReturnType(SqlOperatorBinding opBinding) {
+    final RelDataTypeFactory typeFactory = opBinding.getTypeFactory();
+    final RelDataType recordType = opBinding.getOperandType(0);
+    switch (recordType.getSqlTypeName()) {
+      case ROW:
+        final String fieldName = opBinding.getOperandLiteralValue(1, String.class);
+        String[] fieldNameChain = fieldName.split("\\.");
+        RelDataType relDataType = opBinding.getOperandType(0);
+        for (int i = 0; i < fieldNameChain.length; i++) {
+          RelDataTypeField t = relDataType.getField(fieldNameChain[i], true, true);
+          Preconditions.checkNotNull(t,
+              "Can not find " + fieldNameChain[i] + " within record " + recordType.toString() + " Original String "
+                  + Arrays.toString(fieldNameChain) + " Original row " + recordType.toString());
+          relDataType = t.getType();
+        }
+        if (recordType.isNullable()) {
+          return typeFactory.createTypeWithNullability(relDataType, true);
+        } else {
+          return relDataType;
+        }
+      default:
+        throw new AssertionError("First Operand is suppose to be a Row Struct");
+    }
+  }
+
+  private static class ExtractFunction implements ScalarFunction, ImplementableFunction {
+    private final JavaTypeFactoryImpl javaTypeFactory = new JavaTypeFactoryImpl();
+
+    @Override
+    public CallImplementor getImplementor() {
+      return RexImpTable.createImplementor((translator, call, translatedOperands) -> {
+        Preconditions.checkState(translatedOperands.size() == 2 && call.operands.size() == 2,
+            "Expected 2 operands found " + Math.min(translatedOperands.size(), call.getOperands().size()));
+        Expression op0 = translatedOperands.get(0);
+        Expression op1 = translatedOperands.get(1);
+        Preconditions.checkState(op1.getNodeType().equals(ExpressionType.Constant),
+            "Operand 2 has to be constant and got " + op1.getNodeType());
+        Preconditions.checkState(op1.type.equals(String.class), "Operand 2 has to be String and got " + op1.type);
+        final String fieldName = (String) ((ConstantExpression) op1).value;
+        String[] fieldNameChain = fieldName.split("\\.");
+        RelDataType relDataType = call.operands.get(0).getType();
+        Preconditions.checkState(relDataType.getSqlTypeName().equals(SqlTypeName.ROW),
+            "Expected first operand to be ROW found " + relDataType.toString());
+        Expression currentExpression = op0;
+        for (int i = 0; i < fieldNameChain.length; i++) {
+          Preconditions.checkState(relDataType.getSqlTypeName() == SqlTypeName.ROW,
+              "Must be ROW found " + relDataType.toString());
+          RelDataTypeField t = relDataType.getField(fieldNameChain[i], true, true);
+          Preconditions.checkNotNull(t,
+              "Notfound " + fieldNameChain[i] + " in the following struct " + relDataType.toString()
+                  + " Original String " + Arrays.toString(fieldNameChain) + " Original row " + call.operands.get(0)
+                  .getType());
+          currentExpression = Expressions.arrayIndex(Expressions.convert_(currentExpression, Object[].class),
+              Expressions.constant(t.getIndex()));
+          relDataType = t.getType();
+        }
+        Type fieldType = javaTypeFactory.getJavaClass(relDataType);
+        return EnumUtils.convert(currentExpression, fieldType);
+      }, NullPolicy.ARG0, false);
+    }
+
+    @Override
+    public RelDataType getReturnType(RelDataTypeFactory typeFactory) {
+      throw new IllegalStateException("should not be called");
+    }
+
+    @Override
+    public List<FunctionParameter> getParameters() {
+      return builder().add(Object[].class, "row").add(String.class, "path").build();
+    }
+  }
+
+  @Override
+  public String getAllowedSignatures(String opNameToUse) {
+    return opNameToUse + "(<ROW>, <VARCHAR>)";
+  }
+}
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/util/JsonUtil.java b/samza-sql/src/main/java/org/apache/samza/sql/util/JsonUtil.java
index afd6490..47c761a 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/util/JsonUtil.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/util/JsonUtil.java
@@ -22,7 +22,7 @@
 import java.io.IOException;
 import java.io.StringWriter;
 
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.SamzaException;
 import org.codehaus.jackson.map.DeserializationConfig;
 import org.codehaus.jackson.map.ObjectMapper;
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/util/SamzaSqlQueryParser.java b/samza-sql/src/main/java/org/apache/samza/sql/util/SamzaSqlQueryParser.java
index b6fb76f..4a6390b 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/util/SamzaSqlQueryParser.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/util/SamzaSqlQueryParser.java
@@ -115,7 +115,7 @@
     String selectQuery;
     ArrayList<String> sources;
     if (sqlNode instanceof SqlInsert) {
-      SqlInsert sqlInsert = ((SqlInsert) sqlNode);
+      SqlInsert sqlInsert = (SqlInsert) sqlNode;
       sink = sqlInsert.getTargetTable().toString();
       if (sqlInsert.getSource() instanceof SqlSelect) {
         SqlSelect sqlSelect = (SqlSelect) sqlInsert.getSource();
@@ -165,6 +165,7 @@
         .traitDefs(traitDefs)
         .context(Contexts.EMPTY_CONTEXT)
         .costFactory(null)
+        //.programs(Programs.CALC_PROGRAM)
         .build();
     return Frameworks.getPlanner(frameworkConfig);
   }
@@ -192,7 +193,7 @@
     } else if (node instanceof SqlIdentifier) {
       sourceList.add(node.toString());
     } else if (node instanceof SqlBasicCall) {
-      SqlBasicCall basicCall = ((SqlBasicCall) node);
+      SqlBasicCall basicCall = (SqlBasicCall) node;
       if (basicCall.getOperator() instanceof SqlAsOperator) {
         getSource(basicCall.operand(0), sourceList);
       } else if (basicCall.getOperator() instanceof SqlUnnestOperator && basicCall.operand(0) instanceof SqlSelect) {
diff --git a/samza-sql/src/main/java/org/apache/samza/sql/util/SqlFileParser.java b/samza-sql/src/main/java/org/apache/samza/sql/util/SqlFileParser.java
index f996987..d68eba1 100644
--- a/samza-sql/src/main/java/org/apache/samza/sql/util/SqlFileParser.java
+++ b/samza-sql/src/main/java/org/apache/samza/sql/util/SqlFileParser.java
@@ -26,8 +26,8 @@
 import java.util.List;
 import java.util.stream.Collectors;
 
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.lang.Validate;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.Validate;
 import org.apache.samza.SamzaException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/avro/TestAvroRelConversion.java b/samza-sql/src/test/java/org/apache/samza/sql/avro/TestAvroRelConversion.java
index 102ad52..dccdd7d 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/avro/TestAvroRelConversion.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/avro/TestAvroRelConversion.java
@@ -50,6 +50,7 @@
 import org.apache.samza.operators.KV;
 import org.apache.samza.sql.avro.schemas.AddressRecord;
 import org.apache.samza.sql.avro.schemas.ComplexRecord;
+import org.apache.samza.sql.avro.schemas.ComplexUnion;
 import org.apache.samza.sql.avro.schemas.Kind;
 import org.apache.samza.sql.avro.schemas.MyFixed;
 import org.apache.samza.sql.avro.schemas.PhoneNumber;
@@ -71,14 +72,16 @@
 
   private static final Logger LOG = LoggerFactory.getLogger(TestAvroRelConversion.class);
   private static final byte[] DEFAULT_TRACKING_ID_BYTES =
-      {76, 75, -24, 10, 33, -117, 24, -52, -110, -39, -5, 102, 65, 57, -62, -1};
+    {76, 75, -24, 10, 33, -117, 24, -52, -110, -39, -5, 102, 65, 57, -62, -1};
 
   private final AvroRelConverter simpleRecordAvroRelConverter;
   private final AvroRelConverter complexRecordAvroRelConverter;
   private final AvroRelConverter nestedRecordAvroRelConverter;
+  private final AvroRelConverter complexUnionAvroRelConverter;
   private final AvroRelSchemaProvider simpleRecordSchemaProvider;
   private final AvroRelSchemaProvider complexRecordSchemaProvider;
   private final AvroRelSchemaProvider nestedRecordSchemaProvider;
+  private final AvroRelSchemaProvider complexUnionSchemaProvider;
 
   private int id = 1;
   private boolean boolValue = true;
@@ -89,11 +92,11 @@
   private MyFixed fixedBytes = new MyFixed();
   private long longValue = 200L;
 
-  private HashMap<String, String> mapValue = new HashMap<String, String>() {{
-    put("key1", "val1");
-    put("key2", "val2");
-    put("key3", "val3");
-  }};
+  private HashMap<String, String> mapValue = new HashMap<String, String>() { {
+      put("key1", "val1");
+      put("key2", "val2");
+      put("key3", "val3");
+    } };
   private List<String> arrayValue = Arrays.asList("val1", "val2", "val3");
   RelSchemaConverter relSchemaConverter = new RelSchemaConverter();
 
@@ -102,6 +105,7 @@
     SystemStream ss1 = new SystemStream("test", "complexRecord");
     SystemStream ss2 = new SystemStream("test", "simpleRecord");
     SystemStream ss3 = new SystemStream("test", "nestedRecord");
+    SystemStream ss4 = new SystemStream("test", "complexUnion");
     props.put(
         String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA, ss1.getSystem(), ss1.getStream()),
         ComplexRecord.SCHEMA$.toString());
@@ -111,15 +115,20 @@
     props.put(
         String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA, ss3.getSystem(), ss3.getStream()),
         Profile.SCHEMA$.toString());
+    props.put(
+        String.format(ConfigBasedAvroRelSchemaProviderFactory.CFG_SOURCE_SCHEMA, ss4.getSystem(), ss4.getStream()),
+        ComplexUnion.SCHEMA$.toString());
 
     ConfigBasedAvroRelSchemaProviderFactory factory = new ConfigBasedAvroRelSchemaProviderFactory();
 
     complexRecordSchemaProvider = (AvroRelSchemaProvider) factory.create(ss1, new MapConfig(props));
     simpleRecordSchemaProvider = (AvroRelSchemaProvider) factory.create(ss2, new MapConfig(props));
     nestedRecordSchemaProvider = (AvroRelSchemaProvider) factory.create(ss3, new MapConfig(props));
+    complexUnionSchemaProvider = (AvroRelSchemaProvider) factory.create(ss3, new MapConfig(props));
     complexRecordAvroRelConverter = new AvroRelConverter(ss1, complexRecordSchemaProvider, new MapConfig());
     simpleRecordAvroRelConverter = new AvroRelConverter(ss2, simpleRecordSchemaProvider, new MapConfig());
     nestedRecordAvroRelConverter = new AvroRelConverter(ss3, nestedRecordSchemaProvider, new MapConfig());
+    complexUnionAvroRelConverter = new AvroRelConverter(ss4, complexUnionSchemaProvider, new MapConfig());
 
     fixedBytes.bytes(DEFAULT_TRACKING_ID_BYTES);
   }
@@ -232,6 +241,42 @@
   }
 
   @Test
+  public void testComplexUnionConversionShouldWorkWithBothStringAndIntTypes() throws Exception {
+    // ComplexUnion is a nested avro non-nullable union-type with both String and Integer type
+    // Test the complex-union conversion for String type.
+    GenericData.Record record = new GenericData.Record(ComplexUnion.SCHEMA$);
+    record.put("non_nullable_union_value", testStrValue);
+
+    ComplexUnion complexUnion = new ComplexUnion();
+    complexUnion.non_nullable_union_value = testStrValue;
+
+    byte[] serializedData = bytesFromGenericRecord(record);
+    GenericRecord genericRecord = genericRecordFromBytes(serializedData, ComplexUnion.SCHEMA$);
+    SamzaSqlRelMessage message = complexUnionAvroRelConverter.convertToRelMessage(new KV<>("key", genericRecord));
+
+    Assert.assertEquals(testStrValue, message.getSamzaSqlRelRecord().getField("non_nullable_union_value").get().toString());
+
+    serializedData = encodeAvroSpecificRecord(ComplexUnion.class, complexUnion);
+    genericRecord = genericRecordFromBytes(serializedData, ComplexUnion.SCHEMA$);
+    Assert.assertEquals(testStrValue, genericRecord.get("non_nullable_union_value").toString());
+
+    // Testing the complex-union conversion for Integer type
+    record.put("non_nullable_union_value", Integer.valueOf(123));
+
+    complexUnion.non_nullable_union_value = Integer.valueOf(123);
+
+    serializedData = bytesFromGenericRecord(record);
+    genericRecord = genericRecordFromBytes(serializedData, ComplexUnion.SCHEMA$);
+    message = complexUnionAvroRelConverter.convertToRelMessage(new KV<>("key", genericRecord));
+    Assert.assertEquals(Integer.valueOf(123), message.getSamzaSqlRelRecord().getField("non_nullable_union_value").get());
+
+    serializedData = encodeAvroSpecificRecord(ComplexUnion.class, complexUnion);
+    genericRecord = genericRecordFromBytes(serializedData, ComplexUnion.SCHEMA$);
+    Assert.assertEquals(Integer.valueOf(123), genericRecord.get("non_nullable_union_value"));
+
+  }
+
+  @Test
   public void testNestedRecordConversion() throws IOException {
     GenericData.Record record = new GenericData.Record(Profile.SCHEMA$);
     record.put("id", 1);
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/avro/schemas/ComplexUnion.avsc b/samza-sql/src/test/java/org/apache/samza/sql/avro/schemas/ComplexUnion.avsc
new file mode 100644
index 0000000..d94417b
--- /dev/null
+++ b/samza-sql/src/test/java/org/apache/samza/sql/avro/schemas/ComplexUnion.avsc
@@ -0,0 +1,32 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*/
+
+{
+    "name": "ComplexUnion",
+    "version" : 1,
+    "namespace": "org.apache.samza.sql.avro.schemas",
+    "type": "record",
+    "fields": [
+        {
+           "name": "non_nullable_union_value",
+           "doc": "union Value.",
+           "type": ["int", "string"]
+        }
+    ]
+}
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/avro/schemas/ComplexUnion.java b/samza-sql/src/test/java/org/apache/samza/sql/avro/schemas/ComplexUnion.java
new file mode 100644
index 0000000..dd746e1
--- /dev/null
+++ b/samza-sql/src/test/java/org/apache/samza/sql/avro/schemas/ComplexUnion.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.samza.sql.avro.schemas;
+@SuppressWarnings("all")
+@org.apache.avro.specific.AvroGenerated
+public class ComplexUnion extends org.apache.avro.specific.SpecificRecordBase implements org.apache.avro.specific.SpecificRecord {
+  public static final org.apache.avro.Schema SCHEMA$ = new org.apache.avro.Schema.Parser().parse("{\"type\":\"record\",\"name\":\"ComplexUnion\",\"namespace\":\"org.apache.samza.sql.avro.schemas\",\"fields\":[{\"name\":\"non_nullable_union_value\",\"type\":[\"int\",\"string\"],\"doc\":\"union Value.\"}],\"version\":1}");
+  public static org.apache.avro.Schema getClassSchema() { return SCHEMA$; }
+  /** union Value. */
+  @Deprecated public java.lang.Object non_nullable_union_value;
+
+  /**
+   * Default constructor.  Note that this does not initialize fields
+   * to their default values from the schema.  If that is desired then
+   * one should use <code>newBuilder()</code>.
+   */
+  public ComplexUnion() {}
+
+  /**
+   * All-args constructor.
+   */
+  public ComplexUnion(java.lang.Object non_nullable_union_value) {
+    this.non_nullable_union_value = non_nullable_union_value;
+  }
+
+  public org.apache.avro.Schema getSchema() { return SCHEMA$; }
+  // Used by DatumWriter.  Applications should not call.
+  public java.lang.Object get(int field$) {
+    switch (field$) {
+    case 0: return non_nullable_union_value;
+    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
+    }
+  }
+  // Used by DatumReader.  Applications should not call.
+  @SuppressWarnings(value="unchecked")
+  public void put(int field$, java.lang.Object value$) {
+    switch (field$) {
+    case 0: non_nullable_union_value = (java.lang.Object)value$; break;
+    default: throw new org.apache.avro.AvroRuntimeException("Bad index");
+    }
+  }
+
+  /**
+   * Gets the value of the 'non_nullable_union_value' field.
+   * union Value.   */
+  public java.lang.Object getNonNullableUnionValue() {
+    return non_nullable_union_value;
+  }
+
+  /**
+   * Sets the value of the 'non_nullable_union_value' field.
+   * union Value.   * @param value the value to set.
+   */
+  public void setNonNullableUnionValue(java.lang.Object value) {
+    this.non_nullable_union_value = value;
+  }
+
+  /** Creates a new ComplexUnion RecordBuilder */
+  public static org.apache.samza.sql.avro.schemas.ComplexUnion.Builder newBuilder() {
+    return new org.apache.samza.sql.avro.schemas.ComplexUnion.Builder();
+  }
+
+  /** Creates a new ComplexUnion RecordBuilder by copying an existing Builder */
+  public static org.apache.samza.sql.avro.schemas.ComplexUnion.Builder newBuilder(org.apache.samza.sql.avro.schemas.ComplexUnion.Builder other) {
+    return new org.apache.samza.sql.avro.schemas.ComplexUnion.Builder(other);
+  }
+
+  /** Creates a new ComplexUnion RecordBuilder by copying an existing ComplexUnion instance */
+  public static org.apache.samza.sql.avro.schemas.ComplexUnion.Builder newBuilder(org.apache.samza.sql.avro.schemas.ComplexUnion other) {
+    return new org.apache.samza.sql.avro.schemas.ComplexUnion.Builder(other);
+  }
+
+  /**
+   * RecordBuilder for ComplexUnion instances.
+   */
+  public static class Builder extends org.apache.avro.specific.SpecificRecordBuilderBase<ComplexUnion>
+    implements org.apache.avro.data.RecordBuilder<ComplexUnion> {
+
+    private java.lang.Object non_nullable_union_value;
+
+    /** Creates a new Builder */
+    private Builder() {
+      super(org.apache.samza.sql.avro.schemas.ComplexUnion.SCHEMA$);
+    }
+
+    /** Creates a Builder by copying an existing Builder */
+    private Builder(org.apache.samza.sql.avro.schemas.ComplexUnion.Builder other) {
+      super(other);
+      if (isValidValue(fields()[0], other.non_nullable_union_value)) {
+        this.non_nullable_union_value = data().deepCopy(fields()[0].schema(), other.non_nullable_union_value);
+        fieldSetFlags()[0] = true;
+      }
+    }
+
+    /** Creates a Builder by copying an existing ComplexUnion instance */
+    private Builder(org.apache.samza.sql.avro.schemas.ComplexUnion other) {
+            super(org.apache.samza.sql.avro.schemas.ComplexUnion.SCHEMA$);
+      if (isValidValue(fields()[0], other.non_nullable_union_value)) {
+        this.non_nullable_union_value = data().deepCopy(fields()[0].schema(), other.non_nullable_union_value);
+        fieldSetFlags()[0] = true;
+      }
+    }
+
+    /** Gets the value of the 'non_nullable_union_value' field */
+    public java.lang.Object getNonNullableUnionValue() {
+      return non_nullable_union_value;
+    }
+
+    /** Sets the value of the 'non_nullable_union_value' field */
+    public org.apache.samza.sql.avro.schemas.ComplexUnion.Builder setNonNullableUnionValue(java.lang.Object value) {
+      validate(fields()[0], value);
+      this.non_nullable_union_value = value;
+      fieldSetFlags()[0] = true;
+      return this;
+    }
+
+    /** Checks whether the 'non_nullable_union_value' field has been set */
+    public boolean hasNonNullableUnionValue() {
+      return fieldSetFlags()[0];
+    }
+
+    /** Clears the value of the 'non_nullable_union_value' field */
+    public org.apache.samza.sql.avro.schemas.ComplexUnion.Builder clearNonNullableUnionValue() {
+      non_nullable_union_value = null;
+      fieldSetFlags()[0] = false;
+      return this;
+    }
+
+    @Override
+    public ComplexUnion build() {
+      try {
+        ComplexUnion record = new ComplexUnion();
+        record.non_nullable_union_value = fieldSetFlags()[0] ? this.non_nullable_union_value : (java.lang.Object) defaultValue(fields()[0]);
+        return record;
+      } catch (Exception e) {
+        throw new org.apache.avro.AvroRuntimeException(e);
+      }
+    }
+  }
+}
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/fn/TestBuildOutputRecordUdf.java b/samza-sql/src/test/java/org/apache/samza/sql/fn/TestBuildOutputRecordUdf.java
index be03ca7..9747c65 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/fn/TestBuildOutputRecordUdf.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/fn/TestBuildOutputRecordUdf.java
@@ -21,7 +21,6 @@
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.Collections;
 import org.apache.samza.sql.SamzaSqlRelRecord;
 import org.junit.Assert;
 import org.junit.Test;
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/planner/CheckerTest.java b/samza-sql/src/test/java/org/apache/samza/sql/planner/CheckerTest.java
index 4727775..b9ecfd2 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/planner/CheckerTest.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/planner/CheckerTest.java
@@ -55,7 +55,7 @@
     public String execute(String val) {
       return "RandomStringtoFail";
     }
- }
+  }
 
   @Test(expected = SamzaSqlValidatorException.class)
   public void testCheckOperandTypesShouldFailOnTypeMisMatch() throws NoSuchMethodException {
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/planner/TestQueryPlanner.java b/samza-sql/src/test/java/org/apache/samza/sql/planner/TestQueryPlanner.java
new file mode 100644
index 0000000..d234067
--- /dev/null
+++ b/samza-sql/src/test/java/org/apache/samza/sql/planner/TestQueryPlanner.java
@@ -0,0 +1,345 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*/
+
+package org.apache.samza.sql.planner;
+
+import java.util.Collection;
+import java.util.Map;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.RelRoot;
+import org.apache.calcite.rel.logical.LogicalFilter;
+import org.apache.calcite.rel.logical.LogicalJoin;
+import org.apache.calcite.rel.logical.LogicalProject;
+import org.apache.calcite.rel.logical.LogicalTableScan;
+import org.apache.calcite.sql.SqlExplainLevel;
+import org.apache.samza.config.Config;
+import org.apache.samza.config.MapConfig;
+import org.apache.samza.sql.dsl.SamzaSqlDslConverterFactory;
+import org.apache.samza.sql.interfaces.DslConverter;
+import org.apache.samza.sql.runner.SamzaSqlApplicationConfig;
+import org.apache.samza.sql.runner.SamzaSqlApplicationRunner;
+import org.apache.samza.sql.util.SamzaSqlTestConfig;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+
+public class TestQueryPlanner {
+
+  @Test
+  public void testTranslate() {
+    Map<String, String> config = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(10);
+    String sql =
+        "Insert into testavro.outputTopic(id) select MyTest(id) from testavro.level1.level2.SIMPLE1 as s where s.id = 10";
+    config.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
+    Config samzaConfig = SamzaSqlApplicationRunner.computeSamzaConfigs(true, new MapConfig(config));
+
+    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRoots = dslConverter.convertDsl(sql);
+    assertEquals(1, relRoots.size());
+  }
+
+  @Test
+  public void testRemoteJoinWithFilter() throws SamzaSqlValidatorException {
+    testRemoteJoinWithFilterHelper(false);
+  }
+
+  @Test
+  public void testRemoteJoinWithUdfAndFilter() throws SamzaSqlValidatorException {
+    testRemoteJoinWithUdfAndFilterHelper(false);
+  }
+
+  @Test
+  public void testRemoteJoinWithFilterAndOptimizer() throws SamzaSqlValidatorException {
+    testRemoteJoinWithFilterHelper(true);
+  }
+
+  @Test
+  public void testRemoteJoinWithUdfAndFilterAndOptimizer() throws SamzaSqlValidatorException {
+    testRemoteJoinWithUdfAndFilterHelper(true);
+  }
+
+  void testRemoteJoinWithFilterHelper(boolean enableOptimizer) throws SamzaSqlValidatorException {
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(1);
+
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testavro.PAGEVIEW as pv "
+            + "join testRemoteStore.Profile.`$table` as p "
+            + " on p.__key__ = pv.profileId"
+            + " where p.name = pv.pageKey AND p.name = 'Mike' AND pv.profileId = 1";
+
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(enableOptimizer));
+
+    Config samzaConfig = new MapConfig(staticConfigs);
+    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRoots = dslConverter.convertDsl(sql);
+
+    /*
+      Query plan without optimization:
+      LogicalProject(__key__=[$1], pageKey=[$1], companyName=['N/A'], profileName=[$5], profileAddress=[$7])
+        LogicalFilter(condition=[AND(=($5, $1), =($5, 'Mike'), =($2, 1))])
+          LogicalJoin(condition=[=($3, $2)], joinType=[inner])
+            LogicalTableScan(table=[[testavro, PAGEVIEW]])
+            LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
+
+      Query plan with optimization:
+      LogicalProject(__key__=[$1], pageKey=[$1], companyName=['N/A'], profileName=[$5], profileAddress=[$7])
+        LogicalFilter(condition=[AND(=($5, $1), =($5, 'Mike'))])
+          LogicalJoin(condition=[=($3, $2)], joinType=[inner])
+            LogicalFilter(condition=[=($2, 1)])
+              LogicalTableScan(table=[[testavro, PAGEVIEW]])
+            LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
+     */
+
+    assertEquals(1, relRoots.size());
+    RelRoot relRoot = relRoots.iterator().next();
+    RelNode relNode = relRoot.rel;
+    assertTrue(relNode instanceof LogicalProject);
+    relNode = relNode.getInput(0);
+    assertTrue(relNode instanceof LogicalFilter);
+    if (enableOptimizer) {
+      assertEquals("AND(=($1, $5), =($5, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
+    } else {
+      assertEquals("AND(=(1, $2), =($1, $5), =($5, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
+    }
+    relNode = relNode.getInput(0);
+    assertTrue(relNode instanceof LogicalJoin);
+    assertEquals(2, relNode.getInputs().size());
+    LogicalJoin join = (LogicalJoin) relNode;
+    RelNode left = join.getLeft();
+    RelNode right = join.getRight();
+    assertTrue("Was instance of "  + right.getClass(), right instanceof LogicalProject);
+    if (enableOptimizer) {
+      assertTrue(left instanceof LogicalFilter);
+      assertEquals("=(1, $2)", ((LogicalFilter) left).getCondition().toString());
+      assertTrue(left.getInput(0) instanceof LogicalTableScan);
+    } else {
+      assertTrue(left instanceof LogicalTableScan);
+    }
+  }
+
+  void testRemoteJoinWithUdfAndFilterHelper(boolean enableOptimizer) throws SamzaSqlValidatorException {
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(1);
+
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testRemoteStore.Profile.`$table` as p "
+            + "join testavro.PAGEVIEW as pv "
+            + " on p.__key__ = BuildOutputRecord('id', pv.profileId)"
+            + " where p.name = 'Mike' and pv.profileId = 1 and p.name = pv.pageKey";
+
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(enableOptimizer));
+
+    Config samzaConfig = new MapConfig(staticConfigs);
+    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRoots = dslConverter.convertDsl(sql);
+
+    /*
+      Query plan without optimization:
+      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
+        LogicalFilter(condition=[AND(=($2, 'Mike'), =($10, 1), =($2, $9))])  ==> Only the second condition could be pushed down.
+          LogicalProject(__key__=[$0], id=[$1], name=[$2], companyId=[$3], address=[$4], selfEmployed=[$5],
+                                  phoneNumbers=[$6], mapValues=[$7], __key__0=[$8], pageKey=[$9], profileId=[$10])
+                                  ==> ProjectMergeRule removes this redundant node.
+            LogicalJoin(condition=[=($0, $11)], joinType=[inner])
+              LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
+              LogicalProject(__key__=[$0], pageKey=[$1], profileId=[$2], $f3=[BuildOutputRecord('id', $2)])  ==> Filter is pushed above project.
+                LogicalTableScan(table=[[testavro, PAGEVIEW]])
+
+      Query plan with optimization:
+      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
+          LogicalFilter(condition=[AND(=($2, 'Mike'), =($2, $9))])
+            LogicalJoin(condition=[=($0, $11)], joinType=[inner])
+              LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
+              LogicalFilter(condition=[=($2, 1)])
+                LogicalProject(__key__=[$0], pageKey=[$1], profileId=[$2], $f3=[BuildOutputRecord('id', $2)])
+                  LogicalTableScan(table=[[testavro, PAGEVIEW]])
+     */
+
+    assertEquals(1, relRoots.size());
+    RelRoot relRoot = relRoots.iterator().next();
+    RelNode relNode = relRoot.rel;
+    assertTrue(relNode instanceof LogicalProject);
+    relNode = relNode.getInput(0);
+    assertTrue(relNode instanceof LogicalFilter);
+    if (enableOptimizer) {
+      assertEquals("AND(=($2, $10), =($2, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
+    } else {
+      assertEquals("AND(=(1, $11), =($2, $10), =($2, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
+    }
+    relNode = relNode.getInput(0);
+    if (enableOptimizer) {
+      assertTrue(relNode instanceof LogicalJoin);
+      assertEquals(2, relNode.getInputs().size());
+    } else {
+      assertTrue(relNode instanceof LogicalProject);
+      relNode = relNode.getInput(0);
+    }
+    LogicalJoin join = (LogicalJoin) relNode;
+    RelNode left = join.getLeft();
+    RelNode right = join.getRight();
+    assertTrue("was instance of " + left.getClass(), left instanceof LogicalProject);
+    if (enableOptimizer) {
+      assertTrue(right instanceof LogicalFilter);
+      assertEquals("=(1, $2)", ((LogicalFilter) right).getCondition().toString());
+      relNode = right.getInput(0);
+    } else {
+      relNode = right;
+    }
+    assertTrue(relNode instanceof LogicalProject);
+    relNode = relNode.getInput(0);
+    assertTrue(relNode instanceof LogicalTableScan);
+  }
+
+  @Test
+  public void testLocalStreamTableInnerJoinFilterOptimization() throws Exception {
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(1);
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, p.name as companyName, p.name as profileName,"
+            + "       p.address as profileAddress "
+            + "from testavro.PROFILE.`$table` as p "
+            + "join testavro.PAGEVIEW as pv "
+            + " on p.id = pv.profileId "
+            + "where p.name = 'Mike'";
+
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(true));
+
+    Config samzaConfig = new MapConfig(staticConfigs);
+    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRootsWithOptimization = dslConverter.convertDsl(sql);
+
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(false));
+
+    samzaConfig = new MapConfig(staticConfigs);
+    dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRootsWithoutOptimization = dslConverter.convertDsl(sql);
+
+    // We do not yet have any join filter optimizations for local joins. Hence the plans with and without optimization
+    // should be the same.
+    assertEquals(RelOptUtil.toString(relRootsWithOptimization.iterator().next().rel, SqlExplainLevel.EXPPLAN_ATTRIBUTES),
+        RelOptUtil.toString(relRootsWithoutOptimization.iterator().next().rel, SqlExplainLevel.EXPPLAN_ATTRIBUTES));
+  }
+
+  @Test
+  public void testRemoteJoinFilterPushDownWithUdfInFilterAndOptimizer() throws SamzaSqlValidatorException {
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(1);
+
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testRemoteStore.Profile.`$table` as p "
+            + "join testavro.PAGEVIEW as pv "
+            + " on p.__key__ = pv.profileId"
+            + " where p.name = pv.pageKey AND p.name = 'Mike' AND pv.profileId = MyTest(pv.profileId)";
+
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(true));
+
+    Config samzaConfig = new MapConfig(staticConfigs);
+    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRoots = dslConverter.convertDsl(sql);
+
+    /*
+      Query plan without optimization:
+      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
+        LogicalFilter(condition=[AND(=($2, $9), =($2, 'Mike'), =($10, CAST(MyTest($10)):INTEGER))])
+          LogicalJoin(condition=[=($0, $10)], joinType=[inner])
+            LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
+            LogicalTableScan(table=[[testavro, PAGEVIEW]])
+
+      Query plan with optimization:
+      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
+        LogicalFilter(condition=[AND(=($2, $9), =($2, 'Mike'))])
+          LogicalJoin(condition=[=($0, $10)], joinType=[inner])
+            LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
+            LogicalFilter(condition=[=($2, CAST(MyTest($2)):INTEGER)])
+              LogicalTableScan(table=[[testavro, PAGEVIEW]])
+     */
+
+    assertEquals(1, relRoots.size());
+    RelRoot relRoot = relRoots.iterator().next();
+    RelNode relNode = relRoot.rel;
+    assertTrue(relNode instanceof LogicalProject);
+    relNode = relNode.getInput(0);
+    assertTrue(relNode instanceof LogicalFilter);
+    assertEquals("AND(=($2, $10), =($2, 'Mike'))", ((LogicalFilter) relNode).getCondition().toString());
+    relNode = relNode.getInput(0);
+    assertTrue(relNode instanceof LogicalJoin);
+    assertEquals(2, relNode.getInputs().size());
+    LogicalJoin join = (LogicalJoin) relNode;
+    RelNode left = join.getLeft();
+    RelNode right = join.getRight();
+    assertTrue(left instanceof LogicalProject);
+    assertTrue(right instanceof LogicalFilter);
+    assertEquals("=($2, CAST(MyTest($2)):INTEGER)", ((LogicalFilter) right).getCondition().toString());
+    assertTrue(right.getInput(0) instanceof LogicalTableScan);
+  }
+
+  @Test
+  public void testRemoteJoinNoFilterPushDownWithUdfInFilterAndOptimizer() throws SamzaSqlValidatorException {
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(1);
+
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testRemoteStore.Profile.`$table` as p "
+            + "join testavro.PAGEVIEW as pv "
+            + " on p.__key__ = pv.profileId"
+            + " where p.name = pv.pageKey AND p.name = 'Mike' AND pv.profileId = MyTestPoly(p.name)";
+
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(true));
+
+    Config samzaConfig = new MapConfig(staticConfigs);
+    DslConverter dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRootsWithOptimization = dslConverter.convertDsl(sql);
+
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(false));
+
+    samzaConfig = new MapConfig(staticConfigs);
+    dslConverter = new SamzaSqlDslConverterFactory().create(samzaConfig);
+    Collection<RelRoot> relRootsWithoutOptimization = dslConverter.convertDsl(sql);
+
+    /*
+      LogicalProject(__key__=[$9], pageKey=[$9], companyName=['N/A'], profileName=[$2], profileAddress=[$4])
+        LogicalFilter(condition=[AND(=($2, $9), =($2, 'Mike'), =($10, CAST(MyTestPoly($10)):INTEGER))])
+          LogicalJoin(condition=[=($0, $10)], joinType=[inner])
+            LogicalTableScan(table=[[testRemoteStore, Profile, $table]])
+            LogicalTableScan(table=[[testavro, PAGEVIEW]])
+     */
+
+    // None of the conditions in the filter could be pushed down as they all require a remote call. Hence the plans
+    // with and without optimization should be the same.
+    assertEquals(RelOptUtil.toString(relRootsWithOptimization.iterator().next().rel, SqlExplainLevel.EXPPLAN_ATTRIBUTES),
+        RelOptUtil.toString(relRootsWithoutOptimization.iterator().next().rel, SqlExplainLevel.EXPPLAN_ATTRIBUTES));
+  }
+}
+
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/runner/TestSamzaSqlApplicationConfig.java b/samza-sql/src/test/java/org/apache/samza/sql/runner/TestSamzaSqlApplicationConfig.java
index 725da90..3e31e70 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/runner/TestSamzaSqlApplicationConfig.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/runner/TestSamzaSqlApplicationConfig.java
@@ -168,7 +168,7 @@
               .collect(Collectors.toList()),
           queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSink).collect(Collectors.toList()));
       Assert.fail();
-    } catch (IllegalArgumentException e) {
+    } catch (NullPointerException e) {
       // swallow
     }
   }
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/system/ConsoleLoggingSystemFactory.java b/samza-sql/src/test/java/org/apache/samza/sql/system/ConsoleLoggingSystemFactory.java
index aa5ac1b..9d9ac0c 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/system/ConsoleLoggingSystemFactory.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/system/ConsoleLoggingSystemFactory.java
@@ -19,7 +19,7 @@
 
 package org.apache.samza.sql.system;
 
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
 import org.apache.samza.config.Config;
 import org.apache.samza.metrics.MetricsRegistry;
 import org.apache.samza.system.OutgoingMessageEnvelope;
@@ -41,7 +41,7 @@
 
   @Override
   public SystemConsumer getConsumer(String systemName, Config config, MetricsRegistry registry) {
-    throw new NotImplementedException();
+    throw new NotImplementedException("Not Implemented");
   }
 
   @Override
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/system/TestAvroSystemFactory.java b/samza-sql/src/test/java/org/apache/samza/sql/system/TestAvroSystemFactory.java
index 65e0ad0..cc33e5f 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/system/TestAvroSystemFactory.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/system/TestAvroSystemFactory.java
@@ -63,15 +63,15 @@
   public static final String CFG_INCLUDE_NULL_SIMPLE_RECORDS = "includeNullSimpleRecords";
   public static final String CFG_SLEEP_BETWEEN_POLLS_MS = "sleepBetweenPollsMs";
 
-  private static final String[] profileNames = {"John", "Mike", "Mary", "Joe", "Brad", "Jennifer"};
-  private static final int[] profileZips = {94000, 94001, 94002, 94003, 94004, 94005};
-  private static final int[] streetNums = {1234, 1235, 1236, 1237, 1238, 1239};
-  private static final String[] phoneNumbers = {"000-000-0000", "111-111-1111", "222-222-2222", "333-333-3333",
+  private static final String[] PROFILE_NAMES = {"John", "Mike", "Mary", "Joe", "Brad", "Jennifer"};
+  private static final int[] PROFILE_ZIPS = {94000, 94001, 94002, 94003, 94004, 94005};
+  private static final int[] STREET_NUMS = {1234, 1235, 1236, 1237, 1238, 1239};
+  private static final String[] PHONE_NUMBERS = {"000-000-0000", "111-111-1111", "222-222-2222", "333-333-3333",
       "444-444-4444", "555-555-5555"};
-  public static final String[] companies = {"MSFT", "LKND", "GOOG", "FB", "AMZN", "CSCO"};
-  public static final String[] pageKeys = {"inbox", "home", "search", "pymk", "group", "job"};
+  public static final String[] COMPANIES = {"MSFT", "LKND", "GOOG", "FB", "AMZN", "CSCO"};
+  public static final String[] PAGE_KEYS = {"inbox", "home", "search", "pymk", "group", "job"};
   public static final byte[] DEFAULT_TRACKING_ID_BYTES =
-      {76, 75, -24, 10, 33, -117, 24, -52, -110, -39, -5, 102, 65, 57, -62, -1};
+    {76, 75, -24, 10, 33, -117, 24, -52, -110, -39, -5, 102, 65, 57, -62, -1};
   public static final int NULL_RECORD_FREQUENCY = 5;
 
 
@@ -79,46 +79,46 @@
 
   public static List<String> getPageKeyProfileNameJoin(int numMessages) {
     return IntStream.range(0, numMessages)
-                .mapToObj(i -> pageKeys[i % pageKeys.length] + "," + profileNames[i % profileNames.length])
+                .mapToObj(i -> PAGE_KEYS[i % PAGE_KEYS.length] + "," + PROFILE_NAMES[i % PROFILE_NAMES.length])
                 .collect(Collectors.toList());
   }
 
   public static List<String> getPageKeyProfileNameAddressJoin(int numMessages) {
     return IntStream.range(0, numMessages)
-        .mapToObj(i -> pageKeys[i % pageKeys.length] + "," + profileNames[i % profileNames.length] + "," +
-            profileZips[i % profileZips.length] + "," + streetNums[i % streetNums.length])
+        .mapToObj(i -> PAGE_KEYS[i % PAGE_KEYS.length] + "," + PROFILE_NAMES[i % PROFILE_NAMES.length] + "," +
+            PROFILE_ZIPS[i % PROFILE_ZIPS.length] + "," + STREET_NUMS[i % STREET_NUMS.length])
         .collect(Collectors.toList());
   }
 
   public static List<String> getPageKeyProfileNameJoinWithNullForeignKeys(int numMessages) {
     // All even profileId foreign keys are null
     return IntStream.range(0, numMessages / 2)
-        .mapToObj(i -> pageKeys[(i * 2 + 1) % pageKeys.length] + "," + profileNames[(i * 2 + 1) % profileNames.length])
+        .mapToObj(i -> PAGE_KEYS[(i * 2 + 1) % PAGE_KEYS.length] + "," + PROFILE_NAMES[(i * 2 + 1) % PROFILE_NAMES.length])
         .collect(Collectors.toList());
   }
 
   public static List<String> getPageKeyProfileNameOuterJoinWithNullForeignKeys(int numMessages) {
     // All even profileId foreign keys are null
     return IntStream.range(0, numMessages)
-        .mapToObj(i -> pageKeys[i % pageKeys.length] + "," + ((i % 2 == 0) ? "null" : profileNames[i % profileNames.length]))
+        .mapToObj(i -> PAGE_KEYS[i % PAGE_KEYS.length] + "," + ((i % 2 == 0) ? "null" : PROFILE_NAMES[i % PROFILE_NAMES.length]))
         .collect(Collectors.toList());
   }
 
   public static List<String> getPageKeyProfileCompanyNameJoin(int numMessages) {
     return IntStream.range(0, numMessages)
-        .mapToObj(i -> pageKeys[i % pageKeys.length] + "," + profileNames[i % profileNames.length] +
-            "," + companies[i % companies.length])
+        .mapToObj(i -> PAGE_KEYS[i % PAGE_KEYS.length] + "," + PROFILE_NAMES[i % PROFILE_NAMES.length] +
+            "," + COMPANIES[i % COMPANIES.length])
         .collect(Collectors.toList());
   }
 
   public static HashMap<String, Integer> getPageKeyGroupByResult(int numMessages, Set<String> includePageKeys) {
     HashMap<String, Integer> pageKeyCountMap = new HashMap<>();
-    int quotient = numMessages / pageKeys.length;
-    int remainder = numMessages % pageKeys.length;
-    IntStream.range(0, pageKeys.length)
+    int quotient = numMessages / PAGE_KEYS.length;
+    int remainder = numMessages % PAGE_KEYS.length;
+    IntStream.range(0, PAGE_KEYS.length)
         .map(k -> {
-          if (includePageKeys.contains(pageKeys[k])) {
-            pageKeyCountMap.put(pageKeys[k], quotient + ((k < remainder) ? 1 : 0));
+          if (includePageKeys.contains(PAGE_KEYS[k])) {
+            pageKeyCountMap.put(PAGE_KEYS[k], quotient + ((k < remainder) ? 1 : 0));
           }
           return k;
         });
@@ -205,12 +205,12 @@
         int curMessages = curMessagesPerSsp.get(ssp);
         // We send num Messages and an end of stream message following that.
         List<IncomingMessageEnvelope> envelopes =
-            IntStream.range(curMessages, curMessages + numMessages/4)
+            IntStream.range(curMessages, curMessages + numMessages / 4)
                 .mapToObj(i -> i < numMessages ? new IncomingMessageEnvelope(ssp, null, getKey(i, ssp),
                     getData(i, ssp)) : IncomingMessageEnvelope.buildEndOfStreamEnvelope(ssp))
                 .collect(Collectors.toList());
         envelopeMap.put(ssp, envelopes);
-        curMessagesPerSsp.put(ssp, curMessages + numMessages/4);
+        curMessagesPerSsp.put(ssp, curMessages + numMessages / 4);
       });
       if (sleepBetweenPollsMs > 0) {
         Thread.sleep(sleepBetweenPollsMs);
@@ -254,23 +254,26 @@
     private Object createProfileRecord(int index) {
       GenericRecord record = new GenericData.Record(Profile.SCHEMA$);
       record.put("id", index);
-      record.put("name", profileNames[index % profileNames.length]);
+      record.put("name", PROFILE_NAMES[index % PROFILE_NAMES.length]);
       record.put("address", createProfileAddressRecord(index));
-      record.put("companyId", includeNullForeignKeys && (index % 2 == 0) ? null : index % companies.length);
-      record.put("phoneNumbers", createProfilePhoneNumbers(index % phoneNumbers.length));
+      record.put("companyId", includeNullForeignKeys && (index % 2 == 0) ? null : index % COMPANIES.length);
+      record.put("phoneNumbers", createProfilePhoneNumbers(index % PHONE_NUMBERS.length));
+      Map<String, Object> mapValues = new HashMap<>();
+      mapValues.put("key", createSimpleRecord(index, false));
+      record.put("mapValues", mapValues);
       return record;
     }
 
     private Object createProfileAddressRecord(int index) {
       GenericRecord record = new GenericData.Record(AddressRecord.SCHEMA$);
       record.put("streetnum", createProfileStreetNumRecord(index));
-      record.put("zip", profileZips[index % profileNames.length]);
+      record.put("zip", PROFILE_ZIPS[index % PROFILE_NAMES.length]);
       return record;
     }
 
     private Object createProfileStreetNumRecord(int index) {
       GenericRecord record = new GenericData.Record(StreetNumRecord.SCHEMA$);
-      record.put("number", streetNums[index % streetNums.length]);
+      record.put("number", STREET_NUMS[index % STREET_NUMS.length]);
       return record;
     }
 
@@ -284,7 +287,7 @@
 
     private Object createPhoneNumberRecord(int index, Kind kind) {
       GenericRecord record = new GenericData.Record(PhoneNumber.SCHEMA$);
-      StringBuilder number = new StringBuilder(phoneNumbers[index]);
+      StringBuilder number = new StringBuilder(PHONE_NUMBERS[index]);
       int lastCharIdx = number.length() - 1;
       String suffix = "";
       switch (kind) {
@@ -307,7 +310,7 @@
     private Object createCompanyRecord(int index) {
       GenericRecord record = new GenericData.Record(Company.SCHEMA$);
       record.put("id", index);
-      record.put("name", companies[index % companies.length]);
+      record.put("name", COMPANIES[index % COMPANIES.length]);
       return record;
     }
 
@@ -315,7 +318,7 @@
       GenericRecord record = new GenericData.Record(PageView.SCHEMA$);
       // All even profileId foreign keys are null
       record.put("profileId", includeNullForeignKeys && (index % 2 == 0) ? null : index);
-      record.put("pageKey", pageKeys[index % pageKeys.length]);
+      record.put("pageKey", PAGE_KEYS[index % PAGE_KEYS.length]);
       return record;
     }
 
@@ -324,7 +327,7 @@
       GenericRecord record = new GenericData.Record(ComplexRecord.SCHEMA$);
       record.put("id", index);
       record.put("string_value", "Name" + index);
-      record.put("bytes_value", ByteBuffer.wrap(("sample bytes").getBytes()));
+      record.put("bytes_value", ByteBuffer.wrap("sample bytes".getBytes()));
       record.put("float_value0", index + 0.123456f);
       record.put("double_value", index + 0.0123456789);
       MyFixed myFixedVar = new MyFixed();
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestFilterTranslator.java b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestFilterTranslator.java
index 2e2b454..396192d 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestFilterTranslator.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestFilterTranslator.java
@@ -66,8 +66,7 @@
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(LogicalFilter.class)
 public class TestFilterTranslator extends TranslatorTestBase {
-  final private String LOGICAL_OP_ID = "sql0_filter_0";
-
+  private static final String LOGICAL_OP_ID = "sql0_filter_0";
 
   @Test
   public void testTranslate() throws IOException, ClassNotFoundException {
@@ -106,7 +105,7 @@
     assertEquals(filterSpec.getOpCode(), OperatorSpec.OpCode.FILTER);
 
     // Verify that the describe() method will establish the context for the filter function
-    Map<Integer, TranslatorContext> mockContexts= new HashMap<>();
+    Map<Integer, TranslatorContext> mockContexts = new HashMap<>();
     mockContexts.put(1, mockTranslatorContext);
     when(mockContext.getApplicationTaskContext()).thenReturn(new SamzaSqlApplicationContext(mockContexts));
     filterSpec.getTransformFn().init(mockContext);
@@ -132,7 +131,7 @@
     when(mockTranslatorContext.getDataContext()).thenReturn(dataContext);
     Object[] result = new Object[1];
 
-    doAnswer( invocation -> {
+    doAnswer(invocation -> {
       Object[] retValue = invocation.getArgumentAt(4, Object[].class);
       retValue[0] = new Boolean(true);
       return null;
@@ -140,7 +139,7 @@
         eq(mockInputMsg.getSamzaSqlRelRecord().getFieldValues().toArray()), eq(result));
     assertTrue(filterFn.apply(mockInputMsg));
 
-    doAnswer( invocation -> {
+    doAnswer(invocation -> {
       Object[] retValue = invocation.getArgumentAt(4, Object[].class);
       retValue[0] = new Boolean(false);
       return null;
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestJoinTranslator.java b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestJoinTranslator.java
index e8f2d1f..8f0d969 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestJoinTranslator.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestJoinTranslator.java
@@ -20,15 +20,17 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
-import org.apache.calcite.adapter.enumerable.EnumerableTableScan;
 import org.apache.calcite.plan.RelOptTable;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.logical.LogicalJoin;
+import org.apache.calcite.rel.logical.LogicalTableScan;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexInputRef;
@@ -57,6 +59,7 @@
 import org.apache.samza.sql.data.SamzaSqlRelMessage;
 import org.apache.samza.sql.interfaces.SqlIOConfig;
 import org.apache.samza.storage.kv.descriptors.RocksDbTableDescriptor;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.internal.util.reflection.Whitebox;
@@ -78,8 +81,11 @@
 /**
  * Tests for {@link JoinTranslator}
  */
+@Ignore("Very challenging to keep mocking the Calcite plan and TestSamzaSqlRemoteTable covers most of it.")
+// TODO if we feel like we need this Test then let's try to use Calcite to build an actual join and condition nodes
+//  it is way more clean and easy than mocking the class
 @RunWith(PowerMockRunner.class)
-@PrepareForTest({LogicalJoin.class, EnumerableTableScan.class})
+@PrepareForTest({LogicalJoin.class, LogicalTableScan.class})
 public class TestJoinTranslator extends TranslatorTestBase {
 
   @Test
@@ -98,17 +104,14 @@
     final int queryId = 0;
     LogicalJoin mockJoin = PowerMockito.mock(LogicalJoin.class);
     TranslatorContext mockTranslatorContext = mock(TranslatorContext.class);
-    RelNode mockLeftInput = PowerMockito.mock(EnumerableTableScan.class);
+    RelNode mockLeftInput = PowerMockito.mock(LogicalTableScan.class);
     RelNode mockRightInput = mock(RelNode.class);
     List<RelNode> inputs = new ArrayList<>();
     inputs.add(mockLeftInput);
     inputs.add(mockRightInput);
     RelOptTable mockLeftTable = mock(RelOptTable.class);
     when(mockLeftInput.getTable()).thenReturn(mockLeftTable);
-    List<String> qualifiedTableName = new ArrayList<String>() {{
-      this.add("test");
-      this.add("LeftTable");
-    }};
+    List<String> qualifiedTableName = Arrays.asList("test", "LeftTable");
     when(mockLeftTable.getQualifiedName()).thenReturn(qualifiedTableName);
     when(mockLeftInput.getId()).thenReturn(1);
     when(mockRightInput.getId()).thenReturn(2);
@@ -138,13 +141,8 @@
     when(mockLeftRowType.getFieldCount()).thenReturn(0); //?? why ??
 
     when(mockLeftInput.getRowType()).thenReturn(mockLeftRowType);
-    List<String> leftFieldNames = new ArrayList<String>() {{
-      this.add("test_table_field1");
-    }};
-    List<String> rightStreamFieldNames = new ArrayList<String>() {
-      {
-        this.add("test_stream_field1");
-      } };
+    List<String> leftFieldNames = Collections.singletonList("test_table_field1");
+    List<String> rightStreamFieldNames = Collections.singletonList("test_stream_field1");
     when(mockLeftRowType.getFieldNames()).thenReturn(leftFieldNames);
     RelDataType mockRightRowType = mock(RelDataType.class);
     when(mockRightInput.getRowType()).thenReturn(mockRightRowType);
@@ -192,7 +190,7 @@
     SqlIOConfig mockIOConfig = mock(SqlIOConfig.class);
     TableDescriptor mockTableDesc;
     if (isRemoteTable) {
-     mockTableDesc = mock(RemoteTableDescriptor.class);
+      mockTableDesc = mock(RemoteTableDescriptor.class);
     } else {
       mockTableDesc = mock(RocksDbTableDescriptor.class);
     }
@@ -240,7 +238,7 @@
       assertTrue(joinFn instanceof SamzaSqlLocalTableJoinFunction);
     }
     assertTrue(Whitebox.getInternalState(joinFn, "isTablePosOnRight").equals(false));
-    assertEquals(new ArrayList<Integer>() {{ this.add(0); }}, Whitebox.getInternalState(joinFn, "streamFieldIds"));
+    assertEquals(Collections.singletonList(0), Whitebox.getInternalState(joinFn, "streamFieldIds"));
     assertEquals(leftFieldNames, Whitebox.getInternalState(joinFn, "tableFieldNames"));
     List<String> outputFieldNames = new ArrayList<>();
     outputFieldNames.addAll(leftFieldNames);
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestProjectTranslator.java b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestProjectTranslator.java
index 40b8671..33e775b 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestProjectTranslator.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestProjectTranslator.java
@@ -18,8 +18,10 @@
 */
 package org.apache.samza.sql.translator;
 
+import com.google.common.collect.ImmutableList;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -27,7 +29,10 @@
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.logical.LogicalProject;
 import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeField;
+import org.apache.calcite.rel.type.RelDataTypeFieldImpl;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.util.Pair;
 import org.apache.samza.application.descriptors.StreamApplicationDescriptorImpl;
 import org.apache.samza.context.ContainerContext;
@@ -66,7 +71,8 @@
 @RunWith(PowerMockRunner.class)
 @PrepareForTest(LogicalProject.class)
 public class TestProjectTranslator extends TranslatorTestBase {
-  final private String LOGICAL_OP_ID = "sql0_project_0";
+  private static final String LOGICAL_OP_ID = "sql0_project_0";
+  private static final String TEST_FIELD = "test_field";
 
   @Test
   public void testTranslate() throws IOException, ClassNotFoundException {
@@ -85,12 +91,23 @@
     when(mockProject.getInputs()).thenReturn(inputs);
     when(mockProject.getInput()).thenReturn(mockInput);
     RelDataType mockRowType = mock(RelDataType.class);
+    List<RelDataTypeField> relFields = new ArrayList<>();
+    String fieldName = TEST_FIELD;
+    int fieldPos = 0;
+    RelDataType dataType = mock(RelDataType.class);
+    when(dataType.getSqlTypeName()).thenReturn(SqlTypeName.ANY);
+    relFields.add(new RelDataTypeFieldImpl(fieldName, fieldPos, dataType));
     when(mockRowType.getFieldCount()).thenReturn(1);
     when(mockProject.getRowType()).thenReturn(mockRowType);
+    when(mockProject.getRowType().getSqlTypeName()).thenReturn(SqlTypeName.ROW);
+    when(mockProject.getRowType().getFieldList()).thenReturn(relFields);
+    when(mockProject.getRowType().isStruct()).thenReturn(true);
     RexNode mockRexField = mock(RexNode.class);
     List<Pair<RexNode, String>> namedProjects = new ArrayList<>();
-    namedProjects.add(Pair.of(mockRexField, "test_field"));
+    namedProjects.add(Pair.of(mockRexField, TEST_FIELD));
     when(mockProject.getNamedProjects()).thenReturn(namedProjects);
+    when(mockProject.getRowType()).thenReturn(mockRowType);
+    when(mockProject.getRowType().getFieldNames()).thenReturn(ImmutableList.of(TEST_FIELD));
     StreamApplicationDescriptorImpl mockAppDesc = mock(StreamApplicationDescriptorImpl.class);
     OperatorSpec<Object, SamzaSqlRelMessage> mockInputOp = mock(OperatorSpec.class);
     MessageStream<SamzaSqlRelMessage> mockStream = new MessageStreamImpl<>(mockAppDesc, mockInputOp);
@@ -116,7 +133,7 @@
     assertEquals(projectSpec.getOpCode(), OperatorSpec.OpCode.MAP);
 
     // Verify that the bootstrap() method will establish the context for the map function
-    Map<Integer, TranslatorContext> mockContexts= new HashMap<>();
+    Map<Integer, TranslatorContext> mockContexts = new HashMap<>();
     mockContexts.put(1, mockTranslatorContext);
     when(mockContext.getApplicationTaskContext()).thenReturn(new SamzaSqlApplicationContext(mockContexts));
     projectSpec.getTransformFn().init(mockContext);
@@ -144,20 +161,15 @@
     Object[] result = new Object[1];
     final Object mockFieldObj = new Object();
 
-    doAnswer( invocation -> {
+    doAnswer(invocation -> {
       Object[] retValue = invocation.getArgumentAt(4, Object[].class);
       retValue[0] = mockFieldObj;
       return null;
     }).when(mockExpr).execute(eq(executionContext), eq(mockContext), eq(dataContext),
         eq(mockInputMsg.getSamzaSqlRelRecord().getFieldValues().toArray()), eq(result));
     SamzaSqlRelMessage retMsg = (SamzaSqlRelMessage) mapFn.apply(mockInputMsg);
-    assertEquals(retMsg.getSamzaSqlRelRecord().getFieldNames(),
-        new ArrayList<String>() {{
-          this.add("test_field");
-        }});
-    assertEquals(retMsg.getSamzaSqlRelRecord().getFieldValues(), new ArrayList<Object>() {{
-          this.add(mockFieldObj);
-        }});
+    assertEquals(retMsg.getSamzaSqlRelRecord().getFieldNames(), Collections.singletonList(TEST_FIELD));
+    assertEquals(retMsg.getSamzaSqlRelRecord().getFieldValues(), Collections.singletonList(mockFieldObj));
 
     // Verify mapFn.apply() updates the TestMetricsRegistryImpl metrics
     assertEquals(1, testMetricsRegistryImpl.getCounters().get(LOGICAL_OP_ID).get(0).getCount());
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestQueryTranslator.java b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestQueryTranslator.java
index efe3896..931036e 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestQueryTranslator.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestQueryTranslator.java
@@ -78,7 +78,7 @@
             .collect(Collectors.toList()),
         queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSink).collect(Collectors.toList()));
 
-    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { },samzaConfig);
+    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { }, samzaConfig);
     QueryTranslator translator = new QueryTranslator(appDesc, samzaSqlApplicationConfig);
     translator.translate(queryInfo.get(0), appDesc, 0);
     OperatorSpecGraph specGraph = appDesc.getOperatorSpecGraph();
@@ -115,7 +115,7 @@
             .collect(Collectors.toList()),
         queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSink).collect(Collectors.toList()));
 
-    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { },samzaConfig);
+    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { }, samzaConfig);
     QueryTranslator translator = new QueryTranslator(appDesc, samzaSqlApplicationConfig);
 
     translator.translate(queryInfo.get(0), appDesc, 0);
@@ -160,7 +160,7 @@
             .collect(Collectors.toList()),
         queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSink).collect(Collectors.toList()));
 
-    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { },samzaConfig);
+    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { }, samzaConfig);
     QueryTranslator translator = new QueryTranslator(appDesc, samzaSqlApplicationConfig);
 
     translator.translate(queryInfo.get(0), appDesc, 0);
@@ -204,7 +204,7 @@
             .collect(Collectors.toList()),
         queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSink).collect(Collectors.toList()));
 
-    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { },samzaConfig);
+    StreamApplicationDescriptorImpl appDesc = new StreamApplicationDescriptorImpl(streamApp -> { }, samzaConfig);
     QueryTranslator translator = new QueryTranslator(appDesc, samzaSqlApplicationConfig);
 
     translator.translate(queryInfo.get(0), appDesc, 0);
@@ -313,30 +313,6 @@
   }
 
   @Test (expected = SamzaException.class)
-  public void testTranslateStreamTableJoinWithoutJoinOperator() {
-    Map<String, String> config = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(configs, 1);
-    String sql =
-        "Insert into testavro.enrichedPageViewTopic(profileName, pageKey)"
-            + " select p.name as profileName, pv.pageKey"
-            + " from testavro.PAGEVIEW as pv, testavro.PROFILE.`$table` as p"
-            + " where p.id = pv.profileId";
-    config.put(SamzaSqlApplicationConfig.CFG_SQL_STMT, sql);
-    Config samzaConfig = SamzaSqlApplicationRunner.computeSamzaConfigs(true, new MapConfig(config));
-
-    List<String> sqlStmts = fetchSqlFromConfig(config);
-    List<SamzaSqlQueryParser.QueryInfo> queryInfo = fetchQueryInfo(sqlStmts);
-    SamzaSqlApplicationConfig samzaSqlApplicationConfig = new SamzaSqlApplicationConfig(new MapConfig(config),
-        queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSources).flatMap(Collection::stream)
-            .collect(Collectors.toList()),
-        queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSink).collect(Collectors.toList()));
-
-    StreamApplicationDescriptorImpl streamAppDesc = new StreamApplicationDescriptorImpl(streamApp -> { }, samzaConfig);
-    QueryTranslator translator = new QueryTranslator(streamAppDesc, samzaSqlApplicationConfig);
-
-    translator.translate(queryInfo.get(0), streamAppDesc, 0);
-  }
-
-  @Test (expected = SamzaException.class)
   public void testTranslateStreamTableJoinWithFullJoinOperator() {
     Map<String, String> config = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(configs, 1);
     String sql =
@@ -656,9 +632,9 @@
 
     Assert.assertEquals(3, specGraph.getOutputStreams().size());
     Assert.assertEquals("kafka", output1System);
-    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_table_sql_0_join_2", output1PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_table_sql_0_join_3", output1PhysicalName);
     Assert.assertEquals("kafka", output2System);
-    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_stream_sql_0_join_2", output2PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_stream_sql_0_join_3", output2PhysicalName);
     Assert.assertEquals("testavro", output3System);
     Assert.assertEquals("enrichedPageViewTopic", output3PhysicalName);
 
@@ -668,9 +644,9 @@
     Assert.assertEquals("testavro", input2System);
     Assert.assertEquals("PROFILE", input2PhysicalName);
     Assert.assertEquals("kafka", input3System);
-    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_table_sql_0_join_2", input3PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_table_sql_0_join_3", input3PhysicalName);
     Assert.assertEquals("kafka", input4System);
-    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_stream_sql_0_join_2", input4PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-sampleAppv1_stream_sql_0_join_3", input4PhysicalName);
   }
 
   @Test
@@ -724,9 +700,9 @@
 
     Assert.assertEquals(3, specGraph.getOutputStreams().size());
     Assert.assertEquals("kafka", output1System);
-    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_2", output1PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_3", output1PhysicalName);
     Assert.assertEquals("kafka", output2System);
-    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_2", output2PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_3", output2PhysicalName);
     Assert.assertEquals("testavro", output3System);
     Assert.assertEquals("enrichedPageViewTopic", output3PhysicalName);
 
@@ -736,9 +712,9 @@
     Assert.assertEquals("testavro", input2System);
     Assert.assertEquals("PROFILE", input2PhysicalName);
     Assert.assertEquals("kafka", input3System);
-    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_2", input3PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_3", input3PhysicalName);
     Assert.assertEquals("kafka", input4System);
-    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_2", input4PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_3", input4PhysicalName);
   }
 
   @Test
@@ -791,9 +767,9 @@
 
     Assert.assertEquals(3, specGraph.getOutputStreams().size());
     Assert.assertEquals("kafka", output1System);
-    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_2", output1PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_3", output1PhysicalName);
     Assert.assertEquals("kafka", output2System);
-    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_2", output2PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_3", output2PhysicalName);
     Assert.assertEquals("testavro", output3System);
     Assert.assertEquals("enrichedPageViewTopic", output3PhysicalName);
 
@@ -803,9 +779,9 @@
     Assert.assertEquals("testavro", input2System);
     Assert.assertEquals("PAGEVIEW", input2PhysicalName);
     Assert.assertEquals("kafka", input3System);
-    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_2", input3PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-table_sql_0_join_3", input3PhysicalName);
     Assert.assertEquals("kafka", input4System);
-    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_2", input4PhysicalName);
+    Assert.assertEquals("sql-job-1-partition_by-stream_sql_0_join_3", input4PhysicalName);
   }
 
   @Test
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestSamzaSqlLocalTableJoinFunction.java b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestSamzaSqlLocalTableJoinFunction.java
index de395fa..c3cb2bb 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/translator/TestSamzaSqlLocalTableJoinFunction.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/translator/TestSamzaSqlLocalTableJoinFunction.java
@@ -158,7 +158,7 @@
     List<String> expectedFieldNames = new ArrayList<>(streamFieldNames);
     expectedFieldNames.addAll(tableFieldNames);
     List<Object> expectedFieldValues = new ArrayList<>(streamFieldValues);
-    expectedFieldValues.addAll(tableFieldNames.stream().map( name -> null ).collect(Collectors.toList()));
+    expectedFieldValues.addAll(tableFieldNames.stream().map(name -> null).collect(Collectors.toList()));
     Assert.assertEquals(outMsg.getSamzaSqlRelRecord().getFieldValues(), expectedFieldValues);
   }
 }
\ No newline at end of file
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/udf/impl/TestReflectionBasedUdfResolver.java b/samza-sql/src/test/java/org/apache/samza/sql/udf/impl/TestReflectionBasedUdfResolver.java
index d4c96af..af2259c 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/udf/impl/TestReflectionBasedUdfResolver.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/udf/impl/TestReflectionBasedUdfResolver.java
@@ -50,7 +50,7 @@
     Collection<UdfMetadata> udfMetadataList = reflectionBasedUdfResolver.getUdfs();
 
     Method method = TestSamzaSqlUdf.class.getMethod("execute", String.class);
-    UdfMetadata udfMetadata = new UdfMetadata("TESTSAMZASQLUDF",
+    UdfMetadata udfMetadata = new UdfMetadata("TestSamzaSqlUdf",
             "Test samza sql udf implementation", method, new MapConfig(), ImmutableList.of(SamzaSqlFieldType.STRING),
                SamzaSqlFieldType.STRING, true);
 
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/util/RemoteStoreIOResolverTestFactory.java b/samza-sql/src/test/java/org/apache/samza/sql/util/RemoteStoreIOResolverTestFactory.java
index 4a1d299..c840f3d 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/util/RemoteStoreIOResolverTestFactory.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/util/RemoteStoreIOResolverTestFactory.java
@@ -88,7 +88,7 @@
   }
 
   private class TestRemoteStoreIOResolver implements SqlIOResolver {
-    private final String SAMZA_SQL_QUERY_TABLE_KEYWORD = "$table";
+    private static final String SAMZA_SQL_QUERY_TABLE_KEYWORD = "$table";
     private final Config config;
     private final Map<String, TableDescriptor> tableDescMap = new HashMap<>();
     private final String changeLogStorePrefix;
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/util/SampleRelTableKeyConverter.java b/samza-sql/src/test/java/org/apache/samza/sql/util/SampleRelTableKeyConverter.java
index 1589995..38cb21f 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/util/SampleRelTableKeyConverter.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/util/SampleRelTableKeyConverter.java
@@ -34,6 +34,10 @@
     if (relRecord.getFieldValues().get(0) instanceof SamzaSqlRelRecord) {
       relRecord = (SamzaSqlRelRecord) relRecord.getFieldValues().get(0);
     }
-    return relRecord.getFieldValues().stream().map(Object::toString).collect(Collectors.toList()).get(0);
+    return relRecord.getFieldValues()
+        .stream()
+        .map(x -> x == null ? null : x.toString())
+        .collect(Collectors.toList())
+        .get(0);
   }
 }
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/util/SampleRelTableKeyConverterTest.java b/samza-sql/src/test/java/org/apache/samza/sql/util/SampleRelTableKeyConverterTest.java
new file mode 100644
index 0000000..4d15e2b
--- /dev/null
+++ b/samza-sql/src/test/java/org/apache/samza/sql/util/SampleRelTableKeyConverterTest.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.samza.sql.util;
+
+import com.google.common.collect.ImmutableList;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.samza.sql.SamzaSqlRelRecord;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+public class SampleRelTableKeyConverterTest {
+
+  @Test
+  public void testNullValue() {
+    SampleRelTableKeyConverter sampleRelTableKeyConverter = new SampleRelTableKeyConverter();
+    List<Object> values = new ArrayList<>();
+    values.add(null);
+    SamzaSqlRelRecord samzaSqlRelRecord = new SamzaSqlRelRecord(ImmutableList.of("c1"), values);
+    Object key = sampleRelTableKeyConverter.convertToTableKeyFormat(samzaSqlRelRecord);
+    Assert.assertNull(key);
+  }
+}
diff --git a/samza-sql/src/test/java/org/apache/samza/sql/util/SamzaSqlTestConfig.java b/samza-sql/src/test/java/org/apache/samza/sql/util/SamzaSqlTestConfig.java
index a2b713c..5a012a6 100644
--- a/samza-sql/src/test/java/org/apache/samza/sql/util/SamzaSqlTestConfig.java
+++ b/samza-sql/src/test/java/org/apache/samza/sql/util/SamzaSqlTestConfig.java
@@ -19,7 +19,6 @@
 
 package org.apache.samza.sql.util;
 
-import com.google.common.base.Joiner;
 import java.util.HashMap;
 import java.util.Map;
 import org.apache.samza.config.JobConfig;
@@ -35,10 +34,6 @@
 import org.apache.samza.sql.avro.schemas.PageViewCount;
 import org.apache.samza.sql.avro.schemas.Profile;
 import org.apache.samza.sql.avro.schemas.SimpleRecord;
-import org.apache.samza.sql.fn.BuildOutputRecordUdf;
-import org.apache.samza.sql.fn.FlattenUdf;
-import org.apache.samza.sql.fn.GetNestedFieldUdf;
-import org.apache.samza.sql.fn.RegexMatchUdf;
 import org.apache.samza.sql.impl.ConfigBasedIOResolverFactory;
 import org.apache.samza.sql.interfaces.SqlIOConfig;
 import org.apache.samza.sql.runner.SamzaSqlApplicationConfig;
diff --git a/samza-test/src/main/config/negate-number.properties b/samza-test/src/main/config/negate-number.properties
index 01b7f7e..1829008 100644
--- a/samza-test/src/main/config/negate-number.properties
+++ b/samza-test/src/main/config/negate-number.properties
@@ -22,6 +22,10 @@
 # YARN
 yarn.container.count=1
 
+# Config Loader
+job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory
+job.config.loader.properties.path=./__package/config/negate-number.properties
+
 # Task
 task.class=org.apache.samza.test.integration.NegateNumberTask
 task.inputs=kafka.samza-test-topic
diff --git a/samza-test/src/main/config/perf/container-performance.properties b/samza-test/src/main/config/perf/container-performance.properties
index 7dcab02..1f6656b 100644
--- a/samza-test/src/main/config/perf/container-performance.properties
+++ b/samza-test/src/main/config/perf/container-performance.properties
@@ -23,6 +23,10 @@
 yarn.container.count=1
 yarn.container.memory.mb=4096
 
+# Config Loader
+job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory
+job.config.loader.properties.path=./__package/config/perf/container-performance.properties
+
 # Task
 task.opts=-Xmx3072m -XX:+UseConcMarkSweepGC
 task.class=org.apache.samza.test.performance.TestPerformanceTask
diff --git a/samza-test/src/main/config/perf/kafka-read-write-performance.properties b/samza-test/src/main/config/perf/kafka-read-write-performance.properties
index f5ca601..9bf7263 100644
--- a/samza-test/src/main/config/perf/kafka-read-write-performance.properties
+++ b/samza-test/src/main/config/perf/kafka-read-write-performance.properties
@@ -22,6 +22,10 @@
 # YARN
 yarn.container.count=1
 
+# Config Loader
+job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory
+job.config.loader.properties.path=./__package/config/perf/kafka-read-write-performance.properties
+
 # Task
 task.class=org.apache.samza.test.performance.TestPerformanceTask
 task.inputs=kafka.kafka-read-write-performance-input
diff --git a/samza-test/src/main/java/org/apache/samza/example/AsyncApplicationExample.java b/samza-test/src/main/java/org/apache/samza/example/AsyncApplicationExample.java
index 9ec1dca..349bb20 100644
--- a/samza-test/src/main/java/org/apache/samza/example/AsyncApplicationExample.java
+++ b/samza-test/src/main/java/org/apache/samza/example/AsyncApplicationExample.java
@@ -108,18 +108,18 @@
 
     static CompletionStage<Member> decorateMember(int memberId) {
       return CompletableFuture.supplyAsync(() -> {
-          /*
-           * Introduce some lag to mimic remote call. In real use cases, this typically translates to over the wire
-           * network call to some rest service.
-           */
-          try {
-            Thread.sleep((long) (Math.random() * 10000));
-          } catch (InterruptedException ec) {
-            System.out.println("Interrupted during sleep");
-          }
+        /*
+         * Introduce some lag to mimic remote call. In real use cases, this typically translates to over the wire
+         * network call to some rest service.
+         */
+        try {
+          Thread.sleep((long) (Math.random() * 10000));
+        } catch (InterruptedException ec) {
+          System.out.println("Interrupted during sleep");
+        }
 
-          return new Member(memberId, getRandomGender(), getRandomCountry());
-        });
+        return new Member(memberId, getRandomGender(), getRandomCountry());
+      });
     }
 
     static String getRandomGender() {
diff --git a/samza-test/src/main/java/org/apache/samza/test/framework/TestRunner.java b/samza-test/src/main/java/org/apache/samza/test/framework/TestRunner.java
index bca168e..fb65eea 100644
--- a/samza-test/src/main/java/org/apache/samza/test/framework/TestRunner.java
+++ b/samza-test/src/main/java/org/apache/samza/test/framework/TestRunner.java
@@ -29,7 +29,7 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.stream.Collectors;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.samza.SamzaException;
 import org.apache.samza.application.LegacyTaskApplication;
 import org.apache.samza.application.SamzaApplication;
@@ -320,10 +320,10 @@
     SystemConsumer consumer = factory.getConsumer(systemName, config, null);
     String name = (String) outputDescriptor.getPhysicalName().orElse(streamId);
     metadata.get(name).getSystemStreamPartitionMetadata().keySet().forEach(partition -> {
-        SystemStreamPartition temp = new SystemStreamPartition(systemName, streamId, partition);
-        ssps.add(temp);
-        consumer.register(temp, "0");
-      });
+      SystemStreamPartition temp = new SystemStreamPartition(systemName, streamId, partition);
+      ssps.add(temp);
+      consumer.register(temp, "0");
+    });
 
     long t = System.currentTimeMillis();
     Map<SystemStreamPartition, List<IncomingMessageEnvelope>> output = new HashMap<>();
@@ -361,7 +361,7 @@
     return output.entrySet()
         .stream()
         .collect(Collectors.toMap(entry -> entry.getKey().getPartition().getPartitionId(),
-            entry -> entry.getValue().stream().map(e -> (StreamMessageType) e.getMessage()).collect(Collectors.toList())));
+          entry -> entry.getValue().stream().map(e -> (StreamMessageType) e.getMessage()).collect(Collectors.toList())));
   }
 
   /**
@@ -395,18 +395,18 @@
     InMemorySystemProducer producer = (InMemorySystemProducer) factory.getProducer(systemName, config, null);
     SystemStream sysStream = new SystemStream(systemName, streamName);
     partitionData.forEach((partitionId, partition) -> {
-        partition.forEach(e -> {
-            Object key = e instanceof KV ? ((KV) e).getKey() : null;
-            Object value = e instanceof KV ? ((KV) e).getValue() : e;
-            if (value instanceof IncomingMessageEnvelope) {
-              producer.send((IncomingMessageEnvelope) value);
-            } else {
-              producer.send(systemName, new OutgoingMessageEnvelope(sysStream, Integer.valueOf(partitionId), key, value));
-            }
-          });
-        producer.send(systemName, new OutgoingMessageEnvelope(sysStream, Integer.valueOf(partitionId), null,
-            new EndOfStreamMessage(null)));
+      partition.forEach(e -> {
+        Object key = e instanceof KV ? ((KV) e).getKey() : null;
+        Object value = e instanceof KV ? ((KV) e).getValue() : e;
+        if (value instanceof IncomingMessageEnvelope) {
+          producer.send((IncomingMessageEnvelope) value);
+        } else {
+          producer.send(systemName, new OutgoingMessageEnvelope(sysStream, Integer.valueOf(partitionId), key, value));
+        }
       });
+      producer.send(systemName, new OutgoingMessageEnvelope(sysStream, Integer.valueOf(partitionId), null,
+          new EndOfStreamMessage(null)));
+    });
   }
 
   private void deleteStoreDirectories() {
diff --git a/samza-test/src/main/python/configs/tests.json b/samza-test/src/main/python/configs/tests.json
index 7f3b6f0..1e54929 100644
--- a/samza-test/src/main/python/configs/tests.json
+++ b/samza-test/src/main/python/configs/tests.json
@@ -1,5 +1,5 @@
 {
   "samza_executable": "samza-test_2.11-1.5.0-SNAPSHOT.tgz",
   "samza_install_path": "deploy/smoke_tests",
-  "samza_config_factory": "org.apache.samza.config.factories.PropertiesConfigFactory"
+  "samza_config_loader_factory": "org.apache.samza.config.loaders.PropertiesConfigLoaderFactory"
 }
diff --git a/samza-test/src/main/python/deployment.py b/samza-test/src/main/python/deployment.py
index 7cd3cac..a4a4546 100644
--- a/samza-test/src/main/python/deployment.py
+++ b/samza-test/src/main/python/deployment.py
@@ -17,7 +17,6 @@
 
 import os
 import logging
-import shutil
 import urllib
 import zopkio.runtime as runtime
 import zopkio.adhoc_deployer as adhoc_deployer
@@ -76,7 +75,6 @@
 
   # Setup Samza job deployer.
   samza_job_deployer = SamzaJobYarnDeployer({
-    'config_factory': c('samza_config_factory'),
     'yarn_site_template': c('yarn_site_template'),
     'yarn_driver_configs': c('yarn_driver_configs'),
     'yarn_nm_hosts': c('yarn_nm_hosts').values(),
diff --git a/samza-test/src/main/python/samza_job_yarn_deployer.py b/samza-test/src/main/python/samza_job_yarn_deployer.py
index 405678f..e1dc92e 100644
--- a/samza-test/src/main/python/samza_job_yarn_deployer.py
+++ b/samza-test/src/main/python/samza_job_yarn_deployer.py
@@ -35,11 +35,11 @@
 class SamzaJobYarnDeployer(Deployer):
   def __init__(self, configs={}):
     """
-    Instantiates a Samza job deployer that uses run-job.sh and kill-yarn-job.sh 
+    Instantiates a Samza job deployer that uses run-job.sh and kill-yarn-job.sh
     to start and stop Samza jobs in a YARN grid.
 
     param: configs -- Map of config key/values pairs. These configs will be used
-    as a default whenever overrides are not provided in the methods (install, 
+    as a default whenever overrides are not provided in the methods (install,
     start, stop, etc) below.
     """
     logging.getLogger("paramiko").setLevel(logging.ERROR)
@@ -52,7 +52,7 @@
 
   def install(self, package_id, configs={}):
     """
-    Installs a package (tarball, or zip) on to a list of remote hosts by 
+    Installs a package (tarball, or zip) on to a list of remote hosts by
     SFTP'ing the package to the remote install_path.
 
     param: package_id -- A unique ID used to identify an installed YARN package.
@@ -104,29 +104,27 @@
     with a package_id, and a config file.
     param: configs -- Map of config key/values pairs. Valid keys include:
 
-    package_id: The package_id for the package that contains the code for job_id. 
-    Usually, the package_id refers to the .tgz job tarball that contains the 
+    package_id: The package_id for the package that contains the code for job_id.
+    Usually, the package_id refers to the .tgz job tarball that contains the
     code necessary to run job_id.
-    config_factory: The config factory to use to decode the config_file.
     config_file: Path to the config file for the job to be run.
     install_path: Path where the package for the job has been installed on remote NMs.
-    properties: (optional) [(property-name,property-value)] Optional override 
-    properties for the run-job.sh script. These properties override the 
+    properties: (optional) [(property-name,property-value)] Optional override
+    properties for the run-job.sh script. These properties override the
     config_file's properties.
     """
     configs = self._get_merged_configs(configs)
-    self._validate_configs(configs, ['package_id', 'config_factory', 'config_file', 'install_path'])
+    self._validate_configs(configs, ['package_id', 'config_file', 'install_path'])
 
     # Get configs.
     package_id = configs.get('package_id')
-    config_factory = configs.get('config_factory')
     config_file = configs.get('config_file')
     install_path = configs.get('install_path')
     properties = configs.get('properties', {})
     properties['yarn.package.path'] = 'file:' + os.path.join(install_path, self._get_package_tgz_name(package_id))
 
     # Execute bin/run-job.sh locally from driver machine.
-    command = "{0} --config-factory={1} --config-path={2}".format(os.path.join(package_id, "bin/run-job.sh"), config_factory, os.path.join(package_id, config_file))
+    command = "{0} --config-path={1}".format(os.path.join(package_id, "bin/run-app.sh"), os.path.join(package_id, config_file))
     env = self._get_env_vars(package_id)
     for property_name, property_value in properties.iteritems():
       command += " --config {0}={1}".format(property_name, property_value)
@@ -176,7 +174,7 @@
 
   def await(self, job_id, configs={}):
     """
-    Waits for a Samza job to finish using bin/stat-yarn-job.sh. A job is 
+    Waits for a Samza job to finish using bin/stat-yarn-job.sh. A job is
     finished when its "Final State" is not "UNDEFINED".
 
     param: job_id -- A unique ID used to idenitfy a Samza job.
diff --git a/samza-test/src/main/python/tests/smoke_tests.py b/samza-test/src/main/python/tests/smoke_tests.py
index 53d5fa9..a8fb9b1 100644
--- a/samza-test/src/main/python/tests/smoke_tests.py
+++ b/samza-test/src/main/python/tests/smoke_tests.py
@@ -32,7 +32,7 @@
 
 def test_samza_job():
   """
-  Runs a job that reads converts input strings to integers, negates the 
+  Runs a job that reads converts input strings to integers, negates the
   integer, and outputs to a Kafka topic.
   """
   _load_data()
@@ -41,7 +41,7 @@
 
 def validate_samza_job():
   """
-  Validates that negate-number negated all messages, and sent the output to 
+  Validates that negate-number negated all messages, and sent the output to
   samza-test-topic-output.
   """
   logger.info('Running validate_samza_job')
diff --git a/samza-test/src/main/scala/org/apache/samza/test/performance/TestKeyValuePerformance.scala b/samza-test/src/main/scala/org/apache/samza/test/performance/TestKeyValuePerformance.scala
index 9eaed56..46f345d 100644
--- a/samza-test/src/main/scala/org/apache/samza/test/performance/TestKeyValuePerformance.scala
+++ b/samza-test/src/main/scala/org/apache/samza/test/performance/TestKeyValuePerformance.scala
@@ -28,7 +28,7 @@
 import com.google.common.base.Stopwatch
 import com.google.common.collect.ImmutableList
 import com.google.common.collect.ImmutableMap
-import org.apache.commons.lang.RandomStringUtils
+import org.apache.commons.lang3.RandomStringUtils
 import org.apache.samza.config.{Config, JobConfig, MapConfig, StorageConfig}
 import org.apache.samza.container.TaskName
 import org.apache.samza.context.ContainerContextImpl
diff --git a/samza-test/src/test/java/org/apache/samza/storage/kv/TestKeyValueSizeHistogramMetric.java b/samza-test/src/test/java/org/apache/samza/storage/kv/TestKeyValueSizeHistogramMetric.java
index f7545dc..2a579bf 100644
--- a/samza-test/src/test/java/org/apache/samza/storage/kv/TestKeyValueSizeHistogramMetric.java
+++ b/samza-test/src/test/java/org/apache/samza/storage/kv/TestKeyValueSizeHistogramMetric.java
@@ -105,26 +105,26 @@
     }
 
     metricsRegistry.getGroups().forEach(group -> metricsRegistry.getGroup(group.toString()).forEach((name, metric) -> {
-        if (names.contains(name)) {
-          metric.visit(new MetricsVisitor() {
-            @Override
-            public void counter(Counter counter) {
+      if (names.contains(name)) {
+        metric.visit(new MetricsVisitor() {
+          @Override
+          public void counter(Counter counter) {
 
-            }
+          }
 
-            @Override
-            public <T> void gauge(Gauge<T> gauge) {
-              Double num = (Double) gauge.getValue();
-              Assert.assertNotEquals(0D, (Double) gauge.getValue(), 0.0001);
-            }
+          @Override
+          public <T> void gauge(Gauge<T> gauge) {
+            Double num = (Double) gauge.getValue();
+            Assert.assertNotEquals(0D, (Double) gauge.getValue(), 0.0001);
+          }
 
-            @Override
-            public void timer(Timer timer) {
+          @Override
+          public void timer(Timer timer) {
 
-            }
-          });
-        }
-      }));
+          }
+        });
+      }
+    }));
   }
 
   private String getRandomString() {
diff --git a/samza-test/src/test/java/org/apache/samza/test/controlmessages/EndOfStreamIntegrationTest.java b/samza-test/src/test/java/org/apache/samza/test/controlmessages/EndOfStreamIntegrationTest.java
index 53bc234..b0630b2 100644
--- a/samza-test/src/test/java/org/apache/samza/test/controlmessages/EndOfStreamIntegrationTest.java
+++ b/samza-test/src/test/java/org/apache/samza/test/controlmessages/EndOfStreamIntegrationTest.java
@@ -105,8 +105,8 @@
             .map(KV::getValue)
             .partitionBy(pv -> pv.getMemberId(), pv -> pv, KVSerde.of(new NoOpSerde<>(), new NoOpSerde<>()), "p1")
             .sink((m, collector, coordinator) -> {
-                received.add(m.getValue());
-              });
+              received.add(m.getValue());
+            });
       }
     }
 
diff --git a/samza-test/src/test/java/org/apache/samza/test/controlmessages/WatermarkIntegrationTest.java b/samza-test/src/test/java/org/apache/samza/test/controlmessages/WatermarkIntegrationTest.java
index 4691e87..25d31ea 100644
--- a/samza-test/src/test/java/org/apache/samza/test/controlmessages/WatermarkIntegrationTest.java
+++ b/samza-test/src/test/java/org/apache/samza/test/controlmessages/WatermarkIntegrationTest.java
@@ -157,8 +157,8 @@
             .map(KV::getValue)
             .partitionBy(pv -> pv.getMemberId(), pv -> pv, KVSerde.of(new NoOpSerde<>(), new NoOpSerde<>()), "p1")
             .sink((m, collector, coordinator) -> {
-                received.add(m.getValue());
-              });
+              received.add(m.getValue());
+            });
       }
     }
 
diff --git a/samza-test/src/test/java/org/apache/samza/test/framework/StreamApplicationIntegrationTest.java b/samza-test/src/test/java/org/apache/samza/test/framework/StreamApplicationIntegrationTest.java
index 5e25817..6afc77c 100644
--- a/samza-test/src/test/java/org/apache/samza/test/framework/StreamApplicationIntegrationTest.java
+++ b/samza-test/src/test/java/org/apache/samza/test/framework/StreamApplicationIntegrationTest.java
@@ -167,8 +167,8 @@
           .map(m -> new KV(m.getValue().getMemberId(), m.getValue()))
           .sendTo(table)
           .sink((kv, collector, coordinator) -> {
-              LOG.info("Inserted Profile with Key: {} in profile-view-store", kv.getKey());
-            });
+            LOG.info("Inserted Profile with Key: {} in profile-view-store", kv.getKey());
+          });
 
       OutputStream<TestTableData.EnrichedPageView> outputStream = appDescriptor.getOutputStream(enrichedPageViewOSD);
       appDescriptor.getInputStream(pageViewISD)
@@ -177,8 +177,8 @@
           .sendTo(outputStream)
           .map(TestTableData.EnrichedPageView::getPageKey)
           .sink((joinPageKey, collector, coordinator) -> {
-              collector.send(new OutgoingMessageEnvelope(new SystemStream("test", "JoinPageKeys"), null, null, joinPageKey));
-            });
+            collector.send(new OutgoingMessageEnvelope(new SystemStream("test", "JoinPageKeys"), null, null, joinPageKey));
+          });
 
     }
   }
diff --git a/samza-test/src/test/java/org/apache/samza/test/harness/InMemoryIntegrationTestHarness.java b/samza-test/src/test/java/org/apache/samza/test/harness/InMemoryIntegrationTestHarness.java
index f120ac3..0c1f85f 100644
--- a/samza-test/src/test/java/org/apache/samza/test/harness/InMemoryIntegrationTestHarness.java
+++ b/samza-test/src/test/java/org/apache/samza/test/harness/InMemoryIntegrationTestHarness.java
@@ -21,7 +21,7 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Optional;
-import org.apache.commons.lang.RandomStringUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.samza.config.Config;
 import org.apache.samza.config.InMemorySystemConfig;
 import org.apache.samza.config.JobConfig;
diff --git a/samza-test/src/test/java/org/apache/samza/test/operator/RepartitionJoinWindowApp.java b/samza-test/src/test/java/org/apache/samza/test/operator/RepartitionJoinWindowApp.java
index 24726f8..dbdacf1 100644
--- a/samza-test/src/test/java/org/apache/samza/test/operator/RepartitionJoinWindowApp.java
+++ b/samza-test/src/test/java/org/apache/samza/test/operator/RepartitionJoinWindowApp.java
@@ -94,11 +94,11 @@
             new StringSerde(), new JsonSerdeV2<>(UserPageAdClick.class)), "userAdClickWindow")
         .map(windowPane -> KV.of(windowPane.getKey().getKey(), String.valueOf(windowPane.getMessage().size())))
         .sink((message, messageCollector, taskCoordinator) -> {
-            taskCoordinator.commit(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
-            messageCollector.send(
-                new OutgoingMessageEnvelope(
-                    new SystemStream("kafka", outputTopic), null, message.getKey(), message.getValue()));
-          });
+          taskCoordinator.commit(TaskCoordinator.RequestScope.ALL_TASKS_IN_CONTAINER);
+          messageCollector.send(
+              new OutgoingMessageEnvelope(
+                  new SystemStream("kafka", outputTopic), null, message.getKey(), message.getValue()));
+        });
 
 
     intermediateStreamIds.add(((IntermediateMessageStreamImpl) pageViewsRepartitionedByViewId).getStreamId());
diff --git a/samza-test/src/test/java/org/apache/samza/test/operator/TestAsyncFlatMap.java b/samza-test/src/test/java/org/apache/samza/test/operator/TestAsyncFlatMap.java
index 275be34..a60aea7 100644
--- a/samza-test/src/test/java/org/apache/samza/test/operator/TestAsyncFlatMap.java
+++ b/samza-test/src/test/java/org/apache/samza/test/operator/TestAsyncFlatMap.java
@@ -152,14 +152,14 @@
     private static CompletionStage<Collection<PageView>> filterGuestPageViews(PageView pageView,
         Predicate<PageView> shouldFailProcess, Supplier<Long> processJitter) {
       CompletableFuture<Collection<PageView>> filteredPageViews = CompletableFuture.supplyAsync(() -> {
-          try {
-            Thread.sleep(processJitter.get());
-          } catch (InterruptedException ex) {
-            System.out.println("Interrupted during sleep.");
-          }
+        try {
+          Thread.sleep(processJitter.get());
+        } catch (InterruptedException ex) {
+          System.out.println("Interrupted during sleep.");
+        }
 
-          return Long.valueOf(pageView.getUserId()) < 1 ? Collections.emptyList() : Collections.singleton(pageView);
-        });
+        return Long.valueOf(pageView.getUserId()) < 1 ? Collections.emptyList() : Collections.singleton(pageView);
+      });
 
       if (shouldFailProcess.test(pageView)) {
         filteredPageViews.completeExceptionally(new RuntimeException("Remote service threw an exception"));
diff --git a/samza-test/src/test/java/org/apache/samza/test/processor/TestStreamProcessor.java b/samza-test/src/test/java/org/apache/samza/test/processor/TestStreamProcessor.java
index 0c8cea4..1e5e24c 100644
--- a/samza-test/src/test/java/org/apache/samza/test/processor/TestStreamProcessor.java
+++ b/samza-test/src/test/java/org/apache/samza/test/processor/TestStreamProcessor.java
@@ -278,10 +278,10 @@
       doNothing().when(listener).afterStart();
       doNothing().when(listener).afterFailure(any());
       doAnswer(invocation -> {
-          // stopped successfully
-          shutdownLatch.countDown();
-          return null;
-        }).when(listener).afterStop();
+        // stopped successfully
+        shutdownLatch.countDown();
+        return null;
+      }).when(listener).afterStop();
     }
 
     private void initProducer(String bootstrapServer) {
diff --git a/samza-test/src/test/java/org/apache/samza/test/processor/TestZkLocalApplicationRunner.java b/samza-test/src/test/java/org/apache/samza/test/processor/TestZkLocalApplicationRunner.java
index a2a1e5c..fef4836 100644
--- a/samza-test/src/test/java/org/apache/samza/test/processor/TestZkLocalApplicationRunner.java
+++ b/samza-test/src/test/java/org/apache/samza/test/processor/TestZkLocalApplicationRunner.java
@@ -236,10 +236,10 @@
       config.put(ClusterManagerConfig.HOST_AFFINITY_ENABLED, "false");
     }
     storeName.ifPresent(s -> {
-        config.put(String.format(StorageConfig.FACTORY, s), MockStoreFactory.class.getName());
-        config.put(String.format(StorageConfig.KEY_SERDE, s), "string");
-        config.put(String.format(StorageConfig.MSG_SERDE, s), "string");
-      });
+      config.put(String.format(StorageConfig.FACTORY, s), MockStoreFactory.class.getName());
+      config.put(String.format(StorageConfig.KEY_SERDE, s), "string");
+      config.put(String.format(StorageConfig.MSG_SERDE, s), "string");
+    });
     Map<String, String> samzaContainerConfig = ImmutableMap.<String, String>builder().putAll(config).build();
     Map<String, String> applicationConfig = Maps.newHashMap(samzaContainerConfig);
     applicationConfig.putAll(StandaloneTestUtils.getKafkaSystemConfigs(coordinatorSystemName, bootstrapServers(), zkConnect(), null, StandaloneTestUtils.SerdeAlias.STRING, true));
@@ -274,11 +274,11 @@
     final CountDownLatch secondProcessorRegistered = new CountDownLatch(1);
 
     zkUtils.subscribeToProcessorChange((parentPath, currentChilds) -> {
-        // When appRunner2 with id: PROCESSOR_IDS[1] is registered, run processing message in appRunner1.
-        if (currentChilds.contains(PROCESSOR_IDS[1])) {
-          secondProcessorRegistered.countDown();
-        }
-      });
+      // When appRunner2 with id: PROCESSOR_IDS[1] is registered, run processing message in appRunner1.
+      if (currentChilds.contains(PROCESSOR_IDS[1])) {
+        secondProcessorRegistered.countDown();
+      }
+    });
 
     // Set up stream app appRunner2.
     CountDownLatch processedMessagesLatch = new CountDownLatch(NUM_KAFKA_EVENTS);
@@ -356,11 +356,11 @@
     final CountDownLatch secondProcessorRegistered = new CountDownLatch(1);
 
     zkUtils.subscribeToProcessorChange((parentPath, currentChilds) -> {
-        // When appRunner2 with id: PROCESSOR_IDS[1] is registered, start processing message in appRunner1.
-        if (currentChilds.contains(PROCESSOR_IDS[1])) {
-          secondProcessorRegistered.countDown();
-        }
-      });
+      // When appRunner2 with id: PROCESSOR_IDS[1] is registered, start processing message in appRunner1.
+      if (currentChilds.contains(PROCESSOR_IDS[1])) {
+        secondProcessorRegistered.countDown();
+      }
+    });
 
     // Set up appRunner2.
     CountDownLatch processedMessagesLatch = new CountDownLatch(NUM_KAFKA_EVENTS * 2);
@@ -755,10 +755,10 @@
     Map<String, String> configMap = new HashMap<>();
     CoordinatorStreamValueSerde jsonSerde = new CoordinatorStreamValueSerde("set-config");
     metadataStore.all().forEach((key, value) -> {
-        CoordinatorStreamStore.CoordinatorMessageKey coordinatorMessageKey = CoordinatorStreamStore.deserializeCoordinatorMessageKeyFromJson(key);
-        String deserializedValue = jsonSerde.fromBytes(value);
-        configMap.put(coordinatorMessageKey.getKey(), deserializedValue);
-      });
+      CoordinatorStreamStore.CoordinatorMessageKey coordinatorMessageKey = CoordinatorStreamStore.deserializeCoordinatorMessageKeyFromJson(key);
+      String deserializedValue = jsonSerde.fromBytes(value);
+      configMap.put(coordinatorMessageKey.getKey(), deserializedValue);
+    });
     return new MapConfig(configMap);
   }
 
@@ -1279,8 +1279,8 @@
   private static List<SystemStreamPartition> getSystemStreamPartitions(JobModel jobModel) {
     List<SystemStreamPartition> ssps = new ArrayList<>();
     jobModel.getContainers().forEach((containerName, containerModel) -> {
-        containerModel.getTasks().forEach((taskName, taskModel) -> ssps.addAll(taskModel.getSystemStreamPartitions()));
-      });
+      containerModel.getTasks().forEach((taskName, taskModel) -> ssps.addAll(taskModel.getSystemStreamPartitions()));
+    });
     return ssps;
   }
 
diff --git a/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlEndToEnd.java b/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlEndToEnd.java
index b6b8a96..ca78af2 100644
--- a/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlEndToEnd.java
+++ b/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlEndToEnd.java
@@ -499,7 +499,6 @@
     Assert.assertEquals(IntStream.range(0, numMessages).boxed().collect(Collectors.toList()), outMessages);
   }
 
-  @Ignore
   @Test
   public void testEndToEndNestedRecord() throws SamzaSqlValidatorException {
     int numMessages = 10;
@@ -507,9 +506,12 @@
     Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(numMessages);
 
     String sql1 =
-        "Insert into testavro.outputTopic"
-            + " select `phoneNumbers`[0].`kind`"
-            + " from testavro.PROFILE as p";
+        "Insert into testavro.outputTopic (id, bool_value)"
+            // SQL array is one indexed.
+            + " select `phoneNumbers`[1].`kind` as string_value, p.address.streetnum.number as id, "
+            + " `phoneNumbers`[1].`kind` = 'Home' as bool_value, cast(p.address.zip as bigint) as long_value"
+            + " from testavro.PROFILE as p where p.address.zip > 0 and p.address.zip < 100003 ";
+
     List<String> sqlStmts = Collections.singletonList(sql1);
     staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
 
@@ -523,6 +525,72 @@
     Assert.assertEquals(numMessages, outMessages.size());
   }
 
+  /**
+   * Testing the getNestedField built in operator
+   * @throws SamzaSqlValidatorException
+   */
+  @Test
+  public void testEndToEndGetNestedFieldOperator() throws SamzaSqlValidatorException {
+    int numMessages = 10;
+    TestAvroSystemFactory.messages.clear();
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(numMessages);
+    String sql1 =
+        "Insert into testavro.outputTopic (string_value, id, bool_value, double_value, map_values, long_value)"
+            + " select GetNestedField(address, 'streetnum.number') * getNestedField(mapValues['key'], 'id') as id, "
+            + " cast(GetNestedField(address, 'streetnum').number * 1.0 as double) as double_value, mapValues as map_values, "
+            + " GetNestedField(phoneNumbers[1] ,'kind') = 'Home' as bool_value, cast( mapValues['key'].id as bigint) as long_value , "
+            + " GetNestedField(mapValues['key'], 'name') as string_value "
+            + " from testavro.PROFILE as p  where GetNestedField(address, 'zip') > 0 and GetNestedField(address, 'zip') < 100003";
+    List<String> sqlStmts = Collections.singletonList(sql1);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+    runApplication(config);
+
+    List<OutgoingMessageEnvelope> outMessages = new ArrayList<>(TestAvroSystemFactory.messages);
+    // check that the projected values are not null, correct types and good values when easy to check.
+    List<GenericRecord> actualResult = outMessages.stream()
+        .map(x -> (GenericRecord) x.getMessage())
+        .filter(x -> (Boolean) x.get("bool_value"))
+        .filter(x -> x.get("string_value") != null && !x.get("string_value").toString().isEmpty())
+        .filter(x -> x.get("map_values") instanceof Map)
+        .filter(x -> x.get("id") instanceof Integer)
+        .filter(x -> (Long) x.get("long_value") < 10 && (Long) x.get("long_value") >= 0)
+        .filter(x -> x.get("double_value") instanceof Double && (Double) x.get("double_value") >= 1234.0)
+        .collect(Collectors.toList());
+    Assert.assertEquals(
+        "Wrong results size, check the test condition against the Actual outputs -> " + outMessages.toString(),
+        numMessages, actualResult.size());
+  }
+
+
+  @Test
+  public void testEndToEndNestedRecordProjectFilter() throws SamzaSqlValidatorException {
+    int numMessages = 10;
+    TestAvroSystemFactory.messages.clear();
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(numMessages);
+
+    String sql1 = " Insert into testavro.PROFILE1 select (p.address.streetnum.number * p.address.zip) as id , "
+        + " p.address, `phoneNumbers`[1].`kind` = 'Home' as selfEmployed, "
+        + " MAP[cast(id as varchar), `phoneNumbers`[1].number] as mapValues, phoneNumbers, "
+        + " cast(companyId as varchar) || name ||`phoneNumbers`[1].number || 'concat' as name , "
+        + " 100 * ((companyId + 122) / 3 ) as companyId "
+        + " from testavro.PROFILE as p where p.address.zip > 0 "
+        + " and p.address.zip < 100003 and p.address.streetnum.number > 0 ";
+
+    List<String> sqlStmts = Collections.singletonList(sql1);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+
+    runApplication(config);
+
+    List<OutgoingMessageEnvelope> outMessages = new ArrayList<>(TestAvroSystemFactory.messages);
+    Assert.assertEquals(numMessages, outMessages.size());
+  }
+
   @Test
   public void testEndToEndFlattenWithUdf() throws Exception {
     int numMessages = 20;
@@ -647,7 +715,7 @@
     TestAvroSystemFactory.messages.clear();
     Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(numMessages);
     String sql1 = "Insert into testavro.PROFILE1(id, address) "
-        + "select id, BuildOutputRecord('key', GetNestedField(address, 'zip')) as address from testavro.PROFILE";
+        + "select id, BuildOutputRecord('key', p.address.zip) as address from testavro.PROFILE as p";
     List<String> sqlStmts = Collections.singletonList(sql1);
     staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
     runApplication(new MapConfig(staticConfigs));
@@ -724,7 +792,7 @@
             + "       p.name as profileName, p.address as profileAddress "
             + "from testavro.PROFILE.`$table` as p "
             + "join testavro.PAGEVIEW as pv "
-            + " on p.id = pv.profileId";
+            + " on p.id = pv.profileId where p.name = 'Mike' or p.name is not null";
 
     List<String> sqlStmts = Arrays.asList(sql);
     staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
@@ -745,7 +813,7 @@
   }
 
   @Test
-  public void testEndToEndStreamTableInnerJoinWithPrimaryKey() throws Exception {
+  public void testEndToEndStreamTableInnerJoinWithPrimaryKey() {
     int numMessages = 20;
 
     TestAvroSystemFactory.messages.clear();
@@ -864,13 +932,13 @@
 
     List<String> outMessages = TestAvroSystemFactory.messages.stream()
         .map(x -> {
-            GenericRecord profileAddr = (GenericRecord) ((GenericRecord) x.getMessage()).get("profileAddress");
-            GenericRecord streetNum = (GenericRecord) (profileAddr.get("streetnum"));
-            return ((GenericRecord) x.getMessage()).get("pageKey").toString() + ","
-                + (((GenericRecord) x.getMessage()).get("profileName") == null ? "null" :
-                ((GenericRecord) x.getMessage()).get("profileName").toString()) + ","
-                + profileAddr.get("zip") + "," + streetNum.get("number");
-          })
+          GenericRecord profileAddr = (GenericRecord) ((GenericRecord) x.getMessage()).get("profileAddress");
+          GenericRecord streetNum = (GenericRecord) (profileAddr.get("streetnum"));
+          return ((GenericRecord) x.getMessage()).get("pageKey").toString() + ","
+              + (((GenericRecord) x.getMessage()).get("profileName") == null ? "null" :
+              ((GenericRecord) x.getMessage()).get("profileName").toString()) + ","
+              + profileAddr.get("zip") + "," + streetNum.get("number");
+        })
         .collect(Collectors.toList());
     Assert.assertEquals(numMessages, outMessages.size());
     List<String> expectedOutMessages = TestAvroSystemFactory.getPageKeyProfileNameAddressJoin(numMessages);
@@ -1052,7 +1120,7 @@
   }
 
   @Test
-  public void testEndToEndStreamTableTableJoinWithPrimaryKeys() throws Exception {
+  public void testEndToEndStreamTableNestedJoinWithPrimaryKeys() throws Exception {
     int numMessages = 20;
 
     TestAvroSystemFactory.messages.clear();
@@ -1086,7 +1154,42 @@
   }
 
   @Test
-  public void testEndToEndStreamTableTableJoinWithCompositeKey() throws Exception {
+  public void testEndToEndStreamTableNestedJoinWithSubQuery() throws Exception {
+    int numMessages = 20;
+
+    TestAvroSystemFactory.messages.clear();
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(numMessages);
+    String sql =
+      "Insert into testavro.enrichedPageViewTopic "
+            + "select t.pageKey as __key__, t.pageKey as pageKey, c.name as companyName, t.profileName as profileName,"
+            + "       address as profileAddress "
+            + "from (select p.companyId as companyId, p.name as profileName, p.address as address, pv.pageKey as pageKey"
+            + "      from testavro.PAGEVIEW as pv "
+            + "      join testavro.PROFILE.`$table` as p "
+            + "      on MyTest(p.__key__) = MyTest(pv.profileId)) as t "
+            + "join testavro.COMPANY.`$table` as c "
+            + "on MyTest(t.companyId) = MyTest(c.__key__)";
+
+    List<String> sqlStmts = Arrays.asList(sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+
+    runApplication(config);
+
+    List<String> outMessages = TestAvroSystemFactory.messages.stream()
+        .map(x -> ((GenericRecord) x.getMessage()).get("pageKey").toString() + ","
+            + ((GenericRecord) x.getMessage()).get("profileName").toString() + ","
+            + ((GenericRecord) x.getMessage()).get("companyName").toString())
+        .collect(Collectors.toList());
+    Assert.assertEquals(numMessages, outMessages.size());
+    List<String> expectedOutMessages = TestAvroSystemFactory.getPageKeyProfileCompanyNameJoin(numMessages);
+    Assert.assertEquals(expectedOutMessages, outMessages);
+  }
+
+  @Test
+  public void testEndToEndStreamTableNestedJoinWithCompositeKey() throws Exception {
     int numMessages = 20;
 
     TestAvroSystemFactory.messages.clear();
@@ -1114,9 +1217,9 @@
             + ((GenericRecord) x.getMessage()).get("profileName").toString() + ","
             + ((GenericRecord) x.getMessage()).get("companyName").toString())
         .collect(Collectors.toList());
-    Assert.assertEquals(TestAvroSystemFactory.companies.length, outMessages.size());
+    Assert.assertEquals(TestAvroSystemFactory.COMPANIES.length, outMessages.size());
     List<String> expectedOutMessages =
-        TestAvroSystemFactory.getPageKeyProfileCompanyNameJoin(TestAvroSystemFactory.companies.length);
+        TestAvroSystemFactory.getPageKeyProfileCompanyNameJoin(TestAvroSystemFactory.COMPANIES.length);
     Assert.assertEquals(expectedOutMessages, outMessages);
   }
 
@@ -1150,19 +1253,19 @@
     HashMap<String, List<String>> pageKeyCountListMap = new HashMap<>();
     TestAvroSystemFactory.messages.stream()
         .map(x -> {
-            String pageKey = ((GenericRecord) x.getMessage()).get("pageKey").toString();
-            String count = ((GenericRecord) x.getMessage()).get("count").toString();
-            pageKeyCountListMap.computeIfAbsent(pageKey, k -> new ArrayList<>()).add(count);
-            return pageKeyCountListMap;
-          });
+          String pageKey = ((GenericRecord) x.getMessage()).get("pageKey").toString();
+          String count = ((GenericRecord) x.getMessage()).get("count").toString();
+          pageKeyCountListMap.computeIfAbsent(pageKey, k -> new ArrayList<>()).add(count);
+          return pageKeyCountListMap;
+        });
 
     HashMap<String, Integer> pageKeyCountMap = new HashMap<>();
     pageKeyCountListMap.forEach((key, list) -> {
-        // Check that the number of windows per key is non-zero but less than the number of input messages per key.
-        Assert.assertTrue(list.size() > 1 && list.size() < numMessages / TestAvroSystemFactory.pageKeys.length);
-        // Collapse the count of messages per key
-        pageKeyCountMap.put(key, list.stream().mapToInt(Integer::parseInt).sum());
-      });
+      // Check that the number of windows per key is non-zero but less than the number of input messages per key.
+      Assert.assertTrue(list.size() > 1 && list.size() < numMessages / TestAvroSystemFactory.PAGE_KEYS.length);
+      // Collapse the count of messages per key
+      pageKeyCountMap.put(key, list.stream().mapToInt(Integer::parseInt).sum());
+    });
 
     Set<String> pageKeys = new HashSet<>(Arrays.asList("job", "inbox"));
     HashMap<String, Integer> expectedPageKeyCountMap =
diff --git a/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlRemoteTable.java b/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlRemoteTable.java
index d985d80..bd541c6 100644
--- a/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlRemoteTable.java
+++ b/samza-test/src/test/java/org/apache/samza/test/samzasql/TestSamzaSqlRemoteTable.java
@@ -25,6 +25,7 @@
 import java.util.Map;
 import java.util.stream.Collectors;
 import org.apache.avro.generic.GenericRecord;
+import org.apache.samza.SamzaException;
 import org.apache.samza.config.Config;
 import org.apache.samza.config.MapConfig;
 import org.apache.samza.sql.planner.SamzaSqlValidator;
@@ -104,7 +105,26 @@
   }
 
   @Test
-  public void testSourceEndToEndWithKey() throws SamzaSqlValidatorException {
+  public void testJoinEndToEnd() throws SamzaSqlValidatorException {
+    testJoinEndToEndHelper(false);
+  }
+
+  @Test
+  public void testJoinEndToEndWithUdf() throws SamzaSqlValidatorException {
+    testJoinEndToEndWithUdfHelper(false);
+  }
+
+  @Test
+  public void testJoinEndToEndWithOptimizer() throws SamzaSqlValidatorException {
+    testJoinEndToEndHelper(true);
+  }
+
+  @Test
+  public void testJoinEndToEndWithUdfAndOptimizer() throws SamzaSqlValidatorException {
+    testJoinEndToEndWithUdfHelper(true);
+  }
+
+  void testJoinEndToEndHelper(boolean enableOptimizer) throws SamzaSqlValidatorException {
     int numMessages = 20;
 
     TestAvroSystemFactory.messages.clear();
@@ -118,10 +138,11 @@
             + "       p.name as profileName, p.address as profileAddress "
             + "from testRemoteStore.Profile.`$table` as p "
             + "join testavro.PAGEVIEW as pv "
-            + " on p.__key__ = pv.profileId";
+            + " on p.__key__ = pv.profileId where p.name is not null";
 
     List<String> sqlStmts = Arrays.asList(sql);
     staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(enableOptimizer));
 
     Config config = new MapConfig(staticConfigs);
     new SamzaSqlValidator(config).validate(sqlStmts);
@@ -138,8 +159,7 @@
     Assert.assertEquals(expectedOutMessages, outMessages);
   }
 
-  @Test
-  public void testSourceEndToEndWithKeyAndUdf() throws SamzaSqlValidatorException {
+  void testJoinEndToEndWithUdfHelper(boolean enableOptimizer) throws SamzaSqlValidatorException {
     int numMessages = 20;
 
     TestAvroSystemFactory.messages.clear();
@@ -157,6 +177,7 @@
 
     List<String> sqlStmts = Arrays.asList(sql);
     staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(enableOptimizer));
 
     Config config = new MapConfig(staticConfigs);
     new SamzaSqlValidator(config).validate(sqlStmts);
@@ -174,6 +195,96 @@
   }
 
   @Test
+  public void testJoinEndToEndWithFilter() throws SamzaSqlValidatorException {
+    testJoinEndToEndWithFilterHelper(false);
+  }
+
+  @Test
+  public void testJoinEndToEndWithUdfAndFilter() throws SamzaSqlValidatorException {
+    testJoinEndToEndWithUdfAndFilterHelper(false);
+  }
+
+  @Test
+  public void testJoinEndToEndWithFilterAndOptimizer() throws SamzaSqlValidatorException {
+    testJoinEndToEndWithFilterHelper(true);
+  }
+
+  @Test
+  public void testJoinEndToEndWithUdfAndFilterAndOptimizer() throws SamzaSqlValidatorException {
+    testJoinEndToEndWithUdfAndFilterHelper(true);
+  }
+
+  void testJoinEndToEndWithFilterHelper(boolean enableOptimizer) throws SamzaSqlValidatorException {
+    int numMessages = 20;
+
+    TestAvroSystemFactory.messages.clear();
+    RemoteStoreIOResolverTestFactory.records.clear();
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(numMessages);
+    populateProfileTable(staticConfigs, numMessages);
+
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testRemoteStore.Profile.`$table` as p "
+            + "join testavro.PAGEVIEW as pv "
+            + " on p.__key__ = pv.profileId"
+            + " where p.name = 'Mike' and pv.profileId = 1";
+
+    List<String> sqlStmts = Arrays.asList(sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(enableOptimizer));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+
+    runApplication(config);
+
+    List<String> outMessages = TestAvroSystemFactory.messages.stream()
+        .map(x -> ((GenericRecord) x.getMessage()).get("pageKey").toString() + ","
+            + (((GenericRecord) x.getMessage()).get("profileName") == null ? "null" :
+            ((GenericRecord) x.getMessage()).get("profileName").toString()))
+        .collect(Collectors.toList());
+    Assert.assertEquals(1, outMessages.size());
+    Assert.assertEquals(outMessages.get(0), "home,Mike");
+  }
+
+  void testJoinEndToEndWithUdfAndFilterHelper(boolean enableOptimizer) throws SamzaSqlValidatorException {
+    int numMessages = 20;
+
+    TestAvroSystemFactory.messages.clear();
+    RemoteStoreIOResolverTestFactory.records.clear();
+    Map<String, String> staticConfigs = SamzaSqlTestConfig.fetchStaticConfigsWithFactories(numMessages);
+    populateProfileTable(staticConfigs, numMessages);
+
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testRemoteStore.Profile.`$table` as p "
+            + "join testavro.PAGEVIEW as pv "
+            + " on p.__key__ = BuildOutputRecord('id', pv.profileId)"
+            + " where p.name = 'Mike' and pv.profileId = 1";
+
+    List<String> sqlStmts = Arrays.asList(sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_ENABLE_PLAN_OPTIMIZER, Boolean.toString(enableOptimizer));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+
+    runApplication(config);
+
+    List<String> outMessages = TestAvroSystemFactory.messages.stream()
+        .map(x -> ((GenericRecord) x.getMessage()).get("pageKey").toString() + ","
+            + (((GenericRecord) x.getMessage()).get("profileName") == null ? "null" :
+            ((GenericRecord) x.getMessage()).get("profileName").toString()))
+        .collect(Collectors.toList());
+    Assert.assertEquals(1, outMessages.size());
+    Assert.assertEquals(outMessages.get(0), "home,Mike");
+  }
+
+  @Test
   public void testSourceEndToEndWithKeyWithNullForeignKeys() throws SamzaSqlValidatorException {
     int numMessages = 20;
 
@@ -225,7 +336,7 @@
             + "       p.name as profileName, p.address as profileAddress "
             + "from testRemoteStore.Profile.`$table` as p "
             + "right join testavro.PAGEVIEW as pv "
-            + " on p.__key__ = pv.profileId";
+            + " on p.__key__ = pv.profileId where p.name is null or  p.name <> '0'";
 
     List<String> sqlStmts = Arrays.asList(sql);
     staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
@@ -245,6 +356,125 @@
     Assert.assertEquals(expectedOutMessages, outMessages);
   }
 
+  @Test(expected = SamzaException.class)
+  public void testJoinConditionWithMoreThanOneConjunction() throws SamzaSqlValidatorException {
+    int numMessages = 20;
+    Map<String, String> staticConfigs =
+        SamzaSqlTestConfig.fetchStaticConfigsWithFactories(new HashMap<>(), numMessages, true);
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testRemoteStore.Profile.`$table` as p "
+            + "right join testavro.PAGEVIEW as pv "
+            + " on p.__key__ = pv.profileId and p.__key__ = pv.pageKey where p.name is null or  p.name <> '0'";
+
+    List<String> sqlStmts = Arrays.asList(sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+    runApplication(config);
+  }
+
+  @Test(expected = SamzaException.class)
+  public void testJoinConditionMissing__key__() throws SamzaSqlValidatorException {
+    int numMessages = 20;
+    Map<String, String> staticConfigs =
+        SamzaSqlTestConfig.fetchStaticConfigsWithFactories(new HashMap<>(), numMessages, true);
+    String sql =
+        "Insert into testavro.enrichedPageViewTopic "
+            + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+            + "       p.name as profileName, p.address as profileAddress "
+            + "from testRemoteStore.Profile.`$table` as p "
+            + "right join testavro.PAGEVIEW as pv "
+            + " on p.id = pv.profileId where p.name is null or  p.name <> '0'";
+
+    List<String> sqlStmts = Arrays.asList(sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+    runApplication(config);
+  }
+
+
+  @Test
+  public void testSourceEndToEndWithFilterAndInnerJoin() throws SamzaSqlValidatorException {
+    int numMessages = 20;
+    TestAvroSystemFactory.messages.clear();
+    RemoteStoreIOResolverTestFactory.records.clear();
+    Map<String, String> staticConfigs =
+        SamzaSqlTestConfig.fetchStaticConfigsWithFactories(new HashMap<>(), numMessages, true);
+    populateProfileTable(staticConfigs, numMessages);
+
+    String sql = "Insert into testavro.enrichedPageViewTopic "
+        + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+        + "       p.name as profileName, p.address as profileAddress "
+        + "from testavro.PAGEVIEW as pv  "
+        + "join testRemoteStore.Profile.`$table` as p "
+        + " on p.__key__ = pv.profileId"
+        + "  where p.name <> 'Mike' ";
+
+    List<String> sqlStmts = Arrays.asList(sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+
+    runApplication(config);
+
+    List<String> outMessages = TestAvroSystemFactory.messages.stream()
+        .map(x -> ((GenericRecord) x.getMessage()).get("pageKey").toString() + "," + (
+            ((GenericRecord) x.getMessage()).get("profileName") == null ? "null"
+                : ((GenericRecord) x.getMessage()).get("profileName").toString()))
+        .collect(Collectors.toList());
+    List<String> expectedOutMessages = TestAvroSystemFactory.getPageKeyProfileNameJoinWithNullForeignKeys(numMessages)
+        .stream()
+        .filter(x -> !x.contains("Mike"))
+        .collect(Collectors.toList());
+    Assert.assertEquals(expectedOutMessages, outMessages);
+  }
+
+  @Test
+  public void testSourceEndToEndWithFilterAndLeftOuterJoin() throws SamzaSqlValidatorException {
+    int numMessages = 20;
+    TestAvroSystemFactory.messages.clear();
+    RemoteStoreIOResolverTestFactory.records.clear();
+    Map<String, String> staticConfigs =
+        SamzaSqlTestConfig.fetchStaticConfigsWithFactories(new HashMap<>(), numMessages, true);
+    populateProfileTable(staticConfigs, numMessages);
+
+    String sql = "Insert into testavro.enrichedPageViewTopic "
+        + "select pv.pageKey as __key__, pv.pageKey as pageKey, coalesce(null, 'N/A') as companyName,"
+        + "       p.name as profileName, p.address as profileAddress "
+        + "from testavro.PAGEVIEW as pv  "
+        + " LEFT Join testRemoteStore.Profile.`$table` as p "
+        + " on  pv.profileId + 1 - (2/2) = p.__key__ "
+        + "  where p.name <> 'Mary' or p.name is null";
+
+    List<String> sqlStmts = Arrays.asList(sql);
+    staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
+
+    Config config = new MapConfig(staticConfigs);
+    new SamzaSqlValidator(config).validate(sqlStmts);
+
+    runApplication(config);
+
+    List<String> outMessages = TestAvroSystemFactory.messages.stream()
+        .map(x -> ((GenericRecord) x.getMessage()).get("pageKey").toString() + "," + (
+            ((GenericRecord) x.getMessage()).get("profileName") == null ? "null"
+                : ((GenericRecord) x.getMessage()).get("profileName").toString()))
+        .collect(Collectors.toList());
+    List<String> expectedOutMessages =
+        TestAvroSystemFactory.getPageKeyProfileNameOuterJoinWithNullForeignKeys(numMessages)
+            .stream()
+            .filter(x -> !x.contains("Mary"))
+            .collect(Collectors.toList());
+
+    Assert.assertEquals(expectedOutMessages, outMessages);
+  }
+
   @Test
   public void testSameJoinTargetSinkEndToEndRightOuterJoin() throws SamzaSqlValidatorException {
     int numMessages = 21;
@@ -308,7 +538,7 @@
             + "select p.__key__ as __key__, 'UPDATE' as __op__ "
             + "from testRemoteStore.Profile.`$table` as p "
             + "join testavro.PAGEVIEW as pv "
-            + " on p.__key__ = pv.profileId ";
+            + " on p.__key__ = pv.profileId";
 
     List<String> sqlStmts = Arrays.asList(sql);
     staticConfigs.put(SamzaSqlApplicationConfig.CFG_SQL_STMTS_JSON, JsonUtil.toJson(sqlStmts));
diff --git a/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableEndToEnd.java b/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableEndToEnd.java
index a55b8c1..78fc7b5 100644
--- a/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableEndToEnd.java
+++ b/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableEndToEnd.java
@@ -126,9 +126,9 @@
       GenericInputDescriptor<PageView> pageViewISD = ksd.getInputDescriptor("PageView", new NoOpSerde<>());
       appDesc.getInputStream(pageViewISD)
           .map(pv -> {
-              received.add(pv);
-              return pv;
-            })
+            received.add(pv);
+            return pv;
+          })
           .partitionBy(PageView::getMemberId, v -> v, KVSerde.of(new NoOpSerde<>(), new NoOpSerde<>()), "p1")
           .join(table, new PageViewToProfileJoinFunction())
           .sink((m, collector, coordinator) -> joined.add(m));
@@ -188,15 +188,15 @@
 
       profileStream1
           .map(m -> {
-              sentToProfileTable1.add(m);
-              return new KV(m.getMemberId(), m);
-            })
+            sentToProfileTable1.add(m);
+            return new KV(m.getMemberId(), m);
+          })
           .sendTo(profileTable);
       profileStream2
           .map(m -> {
-              sentToProfileTable2.add(m);
-              return new KV(m.getMemberId(), m);
-            })
+            sentToProfileTable2.add(m);
+            return new KV(m.getMemberId(), m);
+          })
           .sendTo(profileTable);
 
       GenericInputDescriptor<PageView> pageViewISD1 = ksd.getInputDescriptor("PageView1", new NoOpSerde<PageView>());
diff --git a/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableWithSideInputsEndToEnd.java b/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableWithSideInputsEndToEnd.java
index eecc6b4..071f65e 100644
--- a/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableWithSideInputsEndToEnd.java
+++ b/samza-test/src/test/java/org/apache/samza/test/table/TestLocalTableWithSideInputsEndToEnd.java
@@ -143,10 +143,10 @@
       return new InMemoryTableDescriptor(PROFILE_TABLE, KVSerde.of(new IntegerSerde(), new ProfileJsonSerde()))
           .withSideInputs(ImmutableList.of(PROFILE_STREAM))
           .withSideInputsProcessor((msg, store) -> {
-              Profile profile = (Profile) msg.getMessage();
-              int key = profile.getMemberId();
-              return ImmutableList.of(new Entry<>(key, profile));
-            });
+            Profile profile = (Profile) msg.getMessage();
+            int key = profile.getMemberId();
+            return ImmutableList.of(new Entry<>(key, profile));
+          });
     }
   }
 
@@ -156,10 +156,10 @@
       return new RocksDbTableDescriptor(PROFILE_TABLE, KVSerde.of(new IntegerSerde(), new ProfileJsonSerde()))
           .withSideInputs(ImmutableList.of(PROFILE_STREAM))
           .withSideInputsProcessor((msg, store) -> {
-              TestTableData.Profile profile = (TestTableData.Profile) msg.getMessage();
-              int key = profile.getMemberId();
-              return ImmutableList.of(new Entry<>(key, profile));
-            });
+            TestTableData.Profile profile = (TestTableData.Profile) msg.getMessage();
+            int key = profile.getMemberId();
+            return ImmutableList.of(new Entry<>(key, profile));
+          });
     }
   }
 }
\ No newline at end of file
diff --git a/samza-test/src/test/java/org/apache/samza/test/util/ArraySystemConsumer.java b/samza-test/src/test/java/org/apache/samza/test/util/ArraySystemConsumer.java
index 6ba28ae..268cd23 100644
--- a/samza-test/src/test/java/org/apache/samza/test/util/ArraySystemConsumer.java
+++ b/samza-test/src/test/java/org/apache/samza/test/util/ArraySystemConsumer.java
@@ -61,11 +61,11 @@
       Map<SystemStreamPartition, List<IncomingMessageEnvelope>> envelopeMap = new HashMap<>();
       final AtomicInteger offset = new AtomicInteger(0);
       set.forEach(ssp -> {
-          List<IncomingMessageEnvelope> envelopes = Arrays.stream(getArrayObjects(ssp.getSystemStream().getStream(), config))
-              .map(object -> new IncomingMessageEnvelope(ssp, String.valueOf(offset.incrementAndGet()), null, object)).collect(Collectors.toList());
-          envelopes.add(IncomingMessageEnvelope.buildEndOfStreamEnvelope(ssp));
-          envelopeMap.put(ssp, envelopes);
-        });
+        List<IncomingMessageEnvelope> envelopes = Arrays.stream(getArrayObjects(ssp.getSystemStream().getStream(), config))
+            .map(object -> new IncomingMessageEnvelope(ssp, String.valueOf(offset.incrementAndGet()), null, object)).collect(Collectors.toList());
+        envelopes.add(IncomingMessageEnvelope.buildEndOfStreamEnvelope(ssp));
+        envelopeMap.put(ssp, envelopes);
+      });
       done = true;
       return envelopeMap;
     } else {
diff --git a/samza-test/src/test/java/org/apache/samza/test/util/SimpleSystemAdmin.java b/samza-test/src/test/java/org/apache/samza/test/util/SimpleSystemAdmin.java
index c735c74..493bce8 100644
--- a/samza-test/src/test/java/org/apache/samza/test/util/SimpleSystemAdmin.java
+++ b/samza-test/src/test/java/org/apache/samza/test/util/SimpleSystemAdmin.java
@@ -53,19 +53,19 @@
   public Map<String, SystemStreamMetadata> getSystemStreamMetadata(Set<String> streamNames) {
     return streamNames.stream()
         .collect(Collectors.toMap(Function.identity(), streamName -> {
-            int messageCount = isBootstrapStream(streamName) ? getMessageCount(streamName) : -1;
-            String oldestOffset = messageCount < 0 ? null : "0";
-            String newestOffset = messageCount < 0 ? null : String.valueOf(messageCount - 1);
-            String upcomingOffset = messageCount < 0 ? null : String.valueOf(messageCount);
-            Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> metadataMap = new HashMap<>();
-            int partitionCount = config.getInt("streams." + streamName + ".partitionCount", 1);
-            for (int i = 0; i < partitionCount; i++) {
-              metadataMap.put(new Partition(i), new SystemStreamMetadata.SystemStreamPartitionMetadata(
-                  oldestOffset, newestOffset, upcomingOffset
-              ));
-            }
-            return new SystemStreamMetadata(streamName, metadataMap);
-          }));
+          int messageCount = isBootstrapStream(streamName) ? getMessageCount(streamName) : -1;
+          String oldestOffset = messageCount < 0 ? null : "0";
+          String newestOffset = messageCount < 0 ? null : String.valueOf(messageCount - 1);
+          String upcomingOffset = messageCount < 0 ? null : String.valueOf(messageCount);
+          Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> metadataMap = new HashMap<>();
+          int partitionCount = config.getInt("streams." + streamName + ".partitionCount", 1);
+          for (int i = 0; i < partitionCount; i++) {
+            metadataMap.put(new Partition(i), new SystemStreamMetadata.SystemStreamPartitionMetadata(
+                oldestOffset, newestOffset, upcomingOffset
+            ));
+          }
+          return new SystemStreamMetadata(streamName, metadataMap);
+        }));
   }
 
   @Override
diff --git a/samza-test/src/test/scala/org/apache/samza/storage/kv/TestKeyValueStores.scala b/samza-test/src/test/scala/org/apache/samza/storage/kv/TestKeyValueStores.scala
index 37fde3a..c472a63 100644
--- a/samza-test/src/test/scala/org/apache/samza/storage/kv/TestKeyValueStores.scala
+++ b/samza-test/src/test/scala/org/apache/samza/storage/kv/TestKeyValueStores.scala
@@ -57,7 +57,7 @@
   def setup() {
     val kvStore: KeyValueStore[Array[Byte], Array[Byte]] = typeOfStore match {
       case "inmemory" =>
-        new InMemoryKeyValueStore
+        new InMemoryKeyValueStore(new KeyValueStoreMetrics)
       case "rocksdb" =>
         new RocksDbKeyValueStore(
           dir,
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/ConsoleLoggingSystemFactory.java b/samza-tools/src/main/java/org/apache/samza/tools/ConsoleLoggingSystemFactory.java
index 4f18cd8..d544e12 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/ConsoleLoggingSystemFactory.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/ConsoleLoggingSystemFactory.java
@@ -26,7 +26,7 @@
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.Function;
 import java.util.stream.Collectors;
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
 import org.apache.samza.Partition;
 import org.apache.samza.config.Config;
 import org.apache.samza.metrics.MetricsRegistry;
@@ -55,7 +55,7 @@
 
   @Override
   public SystemConsumer getConsumer(String systemName, Config config, MetricsRegistry registry) {
-    throw new NotImplementedException();
+    throw new NotImplementedException("Not Implemented");
   }
 
   @Override
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/GenerateKafkaEvents.java b/samza-tools/src/main/java/org/apache/samza/tools/GenerateKafkaEvents.java
index 2beef06..66c919d 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/GenerateKafkaEvents.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/GenerateKafkaEvents.java
@@ -135,13 +135,13 @@
         final int finalIndex = 0;
         Pair<String, byte[]> record = eventGenerator.apply(index);
         producer.send(new ProducerRecord<>(topicName, record.getLeft().getBytes("UTF-8"), record.getRight()),
-            (metadata, exception) -> {
-              if (exception == null) {
-                LOG.info("send completed for event {} at offset {}", finalIndex, metadata.offset());
-              } else {
-                throw new RuntimeException("Failed to send message.", exception);
-              }
-            });
+          (metadata, exception) -> {
+            if (exception == null) {
+              LOG.info("send completed for event {} at offset {}", finalIndex, metadata.offset());
+            } else {
+              throw new RuntimeException("Failed to send message.", exception);
+            }
+          });
         System.out.println(String.format("Published event %d to topic %s", index, topicName));
         if (doSleep) {
           Thread.sleep(1000);
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/RandomValueGenerator.java b/samza-tools/src/main/java/org/apache/samza/tools/RandomValueGenerator.java
index 18e0316..8abf984 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/RandomValueGenerator.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/RandomValueGenerator.java
@@ -46,7 +46,7 @@
     }
     // assert(max > min);
 
-    return (rand.nextInt(max - min + 1) + min);
+    return rand.nextInt(max - min + 1) + min;
   }
 
   public String getNextString(int min, int max) {
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/SamzaSqlConsole.java b/samza-tools/src/main/java/org/apache/samza/tools/SamzaSqlConsole.java
index b2cecfb..9d84b63 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/SamzaSqlConsole.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/SamzaSqlConsole.java
@@ -19,7 +19,6 @@
 
 package org.apache.samza.tools;
 
-import com.google.common.base.Joiner;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -36,8 +35,6 @@
 import org.apache.samza.container.grouper.task.SingleContainerGrouperFactory;
 import org.apache.samza.serializers.StringSerdeFactory;
 import org.apache.samza.sql.avro.ConfigBasedAvroRelSchemaProviderFactory;
-import org.apache.samza.sql.fn.FlattenUdf;
-import org.apache.samza.sql.fn.RegexMatchUdf;
 import org.apache.samza.sql.impl.ConfigBasedIOResolverFactory;
 import org.apache.samza.sql.interfaces.SqlIOConfig;
 import org.apache.samza.sql.runner.SamzaSqlApplicationConfig;
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSchemaGenRelConverterFactory.java b/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSchemaGenRelConverterFactory.java
index cf8c568..8664407 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSchemaGenRelConverterFactory.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSchemaGenRelConverterFactory.java
@@ -38,6 +38,6 @@
   @Override
   public SamzaRelConverter create(SystemStream systemStream, RelSchemaProvider relSchemaProvider, Config config) {
     return relConverters.computeIfAbsent(systemStream,
-        ss -> new AvroSchemaGenRelConverter(ss, (AvroRelSchemaProvider) relSchemaProvider, config));
+      ss -> new AvroSchemaGenRelConverter(ss, (AvroRelSchemaProvider) relSchemaProvider, config));
   }
 }
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSerDeFactory.java b/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSerDeFactory.java
index a052306..3ad2c9b 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSerDeFactory.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/avro/AvroSerDeFactory.java
@@ -31,7 +31,6 @@
 import org.apache.avro.io.DecoderFactory;
 import org.apache.avro.io.Encoder;
 import org.apache.avro.io.EncoderFactory;
-import org.apache.commons.lang.NotImplementedException;
 import org.apache.samza.SamzaException;
 import org.apache.samza.config.Config;
 import org.apache.samza.serializers.Serde;
@@ -43,7 +42,7 @@
  */
 public class AvroSerDeFactory implements SerdeFactory {
 
-  public static String CFG_AVRO_SCHEMA = "serializers.avro.schema";
+  public static final String CFG_AVRO_SCHEMA = "serializers.avro.schema";
 
   @Override
   public Serde getSerde(String name, Config config) {
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/AbstractSamzaBench.java b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/AbstractSamzaBench.java
index ccf6bb2..e8a3254 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/AbstractSamzaBench.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/AbstractSamzaBench.java
@@ -81,7 +81,7 @@
   protected int totalEvents;
   protected String streamId;
 
-  public AbstractSamzaBench(String scriptName, String args[]) throws ParseException {
+  public AbstractSamzaBench(String scriptName, String[] args) throws ParseException {
     options = new Options();
     options.addOption(
         CommandLineHelper.createOption(OPT_SHORT_PROPERTIES_FILE, OPT_LONG_PROPERTIES_FILE, OPT_ARG_PROPERTIES_FILE,
@@ -143,8 +143,8 @@
   }
 
   Config convertToSamzaConfig(Properties props) {
-      Map<String, String> propsValue =
-          props.stringPropertyNames().stream().collect(Collectors.toMap(Function.identity(), props::getProperty));
-      return new MapConfig(propsValue);
-    }
+    Map<String, String> propsValue =
+        props.stringPropertyNames().stream().collect(Collectors.toMap(Function.identity(), props::getProperty));
+    return new MapConfig(propsValue);
+  }
 }
\ No newline at end of file
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/ConfigBasedSspGrouperFactory.java b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/ConfigBasedSspGrouperFactory.java
index 073fbb0..d1683f0 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/ConfigBasedSspGrouperFactory.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/ConfigBasedSspGrouperFactory.java
@@ -52,7 +52,7 @@
 
   private class ConfigBasedSspGrouper implements SystemStreamPartitionGrouper {
     private final Config config;
-    private HashMap<String, Set<Integer>> _streamPartitionsMap = new HashMap<>();
+    private HashMap<String, Set<Integer>> streamPartitionsMap = new HashMap<>();
 
     public ConfigBasedSspGrouper(Config config) {
       this.config = config;
@@ -75,13 +75,13 @@
     private Set<Integer> getPartitions(SystemStream systemStream) {
       String streamName = systemStream.getStream();
 
-      if (!_streamPartitionsMap.containsKey(streamName)) {
+      if (!streamPartitionsMap.containsKey(streamName)) {
         String partitions = config.get(String.format(CONFIG_STREAM_PARTITIONS, streamName));
-        _streamPartitionsMap.put(streamName, Arrays.stream(partitions.split(CFG_PARTITIONS_DELIMITER))
+        streamPartitionsMap.put(streamName, Arrays.stream(partitions.split(CFG_PARTITIONS_DELIMITER))
             .map(Integer::parseInt)
             .collect(Collectors.toSet()));
       }
-      return _streamPartitionsMap.get(streamName);
+      return streamPartitionsMap.get(streamName);
     }
   }
 }
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerBench.java b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerBench.java
index cbfc865..0054b5e 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerBench.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerBench.java
@@ -43,12 +43,12 @@
  */
 public class SystemConsumerBench extends AbstractSamzaBench {
 
-  public static void main(String args[]) throws Exception {
+  public static void main(String[] args) throws Exception {
     SystemConsumerBench bench = new SystemConsumerBench(args);
     bench.start();
   }
 
-  public SystemConsumerBench(String args[]) throws ParseException {
+  public SystemConsumerBench(String[] args) throws ParseException {
     super("system-consumer-bench", args);
   }
 
@@ -77,7 +77,7 @@
 
     System.out.println("Ending consumption at " + Instant.now());
     System.out.println(String.format("Event Rate is %s Messages/Sec ",
-        (numEvents * 1000 / Duration.between(startTime, Instant.now()).toMillis())));
+        numEvents * 1000 / Duration.between(startTime, Instant.now()).toMillis()));
     consumer.stop();
     System.exit(0);
   }
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerWithSamzaBench.java b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerWithSamzaBench.java
index 774eb9a..bdb8fce 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerWithSamzaBench.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemConsumerWithSamzaBench.java
@@ -56,7 +56,7 @@
     super("system-consumer-with-samza-bench", args);
   }
 
-  public static void main(String args[]) throws Exception {
+  public static void main(String[] args) throws Exception {
     SystemConsumerBench bench = new SystemConsumerBench(args);
     bench.start();
   }
@@ -98,7 +98,7 @@
     System.out.println("\n*******************");
     System.out.println(String.format("Started at %s Ending at %s ", consumeFn.startTime, endTime));
     System.out.println(String.format("Event Rate is %s Messages/Sec ",
-        (consumeFn.getEventsConsumed() * 1000 / Duration.between(consumeFn.startTime, Instant.now()).toMillis())));
+        consumeFn.getEventsConsumed() * 1000 / Duration.between(consumeFn.startTime, Instant.now()).toMillis()));
 
     System.out.println(
         "Event Rate is " + consumeFn.getEventsConsumed() * 1000 / Duration.between(consumeFn.startTime, endTime).toMillis());
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemProducerBench.java b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemProducerBench.java
index 6c2a5f2..af54a36 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemProducerBench.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/benchmark/SystemProducerBench.java
@@ -23,16 +23,11 @@
 import java.time.Duration;
 import java.time.Instant;
 import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.samza.Partition;
-import org.apache.samza.config.Config;
-import org.apache.samza.config.MapConfig;
 import org.apache.samza.system.OutgoingMessageEnvelope;
 import org.apache.samza.system.SystemProducer;
 import org.apache.samza.system.SystemStreamPartition;
@@ -53,12 +48,12 @@
 
   private byte[] value;
 
-  public static void main(String args[]) throws Exception {
+  public static void main(String[] args) throws Exception {
     SystemProducerBench bench = new SystemProducerBench(args);
     bench.start();
   }
 
-  public SystemProducerBench(String args[]) throws ParseException {
+  public SystemProducerBench(String[] args) throws ParseException {
     super("system-producer", args);
   }
 
@@ -93,13 +88,13 @@
 
     System.out.println("Ending production at " + Instant.now());
     System.out.println(String.format("Event Rate is %s Messages/Sec",
-        (totalEvents * 1000 / Duration.between(startTime, Instant.now()).toMillis())));
+        totalEvents * 1000 / Duration.between(startTime, Instant.now()).toMillis()));
 
     producer.flush(source);
 
     System.out.println("Ending flush at " + Instant.now());
     System.out.println(String.format("Event Rate with flush is %s Messages/Sec",
-        (totalEvents * 1000 / Duration.between(startTime, Instant.now()).toMillis())));
+        totalEvents * 1000 / Duration.between(startTime, Instant.now()).toMillis()));
     producer.stop();
     System.exit(0);
   }
diff --git a/samza-tools/src/main/java/org/apache/samza/tools/json/JsonRelConverterFactory.java b/samza-tools/src/main/java/org/apache/samza/tools/json/JsonRelConverterFactory.java
index 4db066a..f2c3769 100644
--- a/samza-tools/src/main/java/org/apache/samza/tools/json/JsonRelConverterFactory.java
+++ b/samza-tools/src/main/java/org/apache/samza/tools/json/JsonRelConverterFactory.java
@@ -21,7 +21,7 @@
 
 import java.io.IOException;
 import java.util.List;
-import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.lang3.NotImplementedException;
 import org.apache.samza.SamzaException;
 import org.apache.samza.config.Config;
 import org.apache.samza.operators.KV;
@@ -51,7 +51,7 @@
 
     @Override
     public SamzaSqlRelMessage convertToRelMessage(KV<Object, Object> kv) {
-      throw new NotImplementedException();
+      throw new NotImplementedException("Not implemented.");
     }
 
     @Override
diff --git a/samza-yarn/src/main/java/org/apache/samza/job/yarn/FileSystemImplConfig.java b/samza-yarn/src/main/java/org/apache/samza/job/yarn/FileSystemImplConfig.java
index 7e10f4f..a582dbb 100644
--- a/samza-yarn/src/main/java/org/apache/samza/job/yarn/FileSystemImplConfig.java
+++ b/samza-yarn/src/main/java/org/apache/samza/job/yarn/FileSystemImplConfig.java
@@ -20,11 +20,7 @@
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Set;
-import org.apache.commons.lang.StringUtils;
 import org.apache.samza.config.Config;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 
 /**
diff --git a/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnAppState.java b/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnAppState.java
index 4d15b1a..15ca92f 100644
--- a/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnAppState.java
+++ b/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnAppState.java
@@ -22,7 +22,6 @@
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
-import org.apache.samza.job.yarn.YarnContainer;
 
 import java.net.URL;
 import java.util.Map;
diff --git a/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnClusterResourceManager.java b/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnClusterResourceManager.java
index e05b31e..46507d9 100644
--- a/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnClusterResourceManager.java
+++ b/samza-yarn/src/main/java/org/apache/samza/job/yarn/YarnClusterResourceManager.java
@@ -81,7 +81,7 @@
   private static final int PREFERRED_HOST_PRIORITY = 0;
   private static final int ANY_HOST_PRIORITY = 1;
 
-  private final String INVALID_PROCESSOR_ID = "-1";
+  private static final String INVALID_PROCESSOR_ID = "-1";
 
   /**
    * The AMClient instance to request resources from yarn.
@@ -153,11 +153,11 @@
     // Use the Samza job config "fs.<scheme>.impl" and "fs.<scheme>.impl.*" for YarnConfiguration
     FileSystemImplConfig fsImplConfig = new FileSystemImplConfig(config);
     fsImplConfig.getSchemes().forEach(
-        scheme -> {
-          fsImplConfig.getSchemeConfig(scheme).forEach(
-              (confKey, confValue) -> yarnConfiguration.set(confKey, confValue)
-          );
-        }
+      scheme -> {
+        fsImplConfig.getSchemeConfig(scheme).forEach(
+          (confKey, confValue) -> yarnConfiguration.set(confKey, confValue)
+        );
+      }
     );
 
     MetricsRegistryMap registry = new MetricsRegistryMap();
@@ -204,7 +204,7 @@
    */
   @Override
   public void start() {
-    if(!started.compareAndSet(false, true)) {
+    if (!started.compareAndSet(false, true)) {
       log.info("Attempting to start an already started YarnClusterResourceManager");
       return;
     }
@@ -217,7 +217,7 @@
     nmClientAsync.start();
     lifecycle.onInit();
 
-    if(lifecycle.shouldShutdown()) {
+    if (lifecycle.shouldShutdown()) {
       clusterManagerCallback.onError(new SamzaException("Invalid resource request."));
     }
 
@@ -338,11 +338,11 @@
   //In that case, this scan will turn into a lookup. This change will require changes/testing in the UI files because
   //those UI stub templates operate on the YarnContainer object.
   private String getRunningProcessorId(String containerId) {
-    for(Map.Entry<String, YarnContainer> entry : state.runningProcessors.entrySet()) {
+    for (Map.Entry<String, YarnContainer> entry : state.runningProcessors.entrySet()) {
       String key = entry.getKey();
       YarnContainer yarnContainer = entry.getValue();
       String yarnContainerId = yarnContainer.id().toString();
-      if(yarnContainerId.equals(containerId)) {
+      if (yarnContainerId.equals(containerId)) {
         return key;
       }
     }
@@ -395,7 +395,7 @@
     service.onShutdown();
     metrics.stop();
 
-    if(status != SamzaApplicationState.SamzaAppStatus.UNDEFINED) {
+    if (status != SamzaApplicationState.SamzaAppStatus.UNDEFINED) {
       cleanupStagingDir();
     }
   }
@@ -406,7 +406,7 @@
    */
   private void cleanupStagingDir() {
     String yarnJobStagingDirectory = yarnConfig.getYarnJobStagingDirectory();
-    if(yarnJobStagingDirectory != null) {
+    if (yarnJobStagingDirectory != null) {
       JobContext context = new JobContext();
       context.setAppStagingDir(new Path(yarnJobStagingDirectory));
 
@@ -417,7 +417,7 @@
         log.error("Unable to clean up file system.", e);
         return;
       }
-      if(fs != null) {
+      if (fs != null) {
         YarnJobUtil.cleanupStagingDir(context, fs);
       }
     }
@@ -433,7 +433,7 @@
   public void onContainersCompleted(List<ContainerStatus> statuses) {
     List<SamzaResourceStatus> samzaResourceStatuses = new ArrayList<>();
 
-    for(ContainerStatus status: statuses) {
+    for (ContainerStatus status : statuses) {
       log.info("Got completion notification for Container ID: {} with status: {} and state: {}. Diagnostics information: {}.",
           status.getContainerId(), status.getExitStatus(), status.getState(), status.getDiagnostics());
 
@@ -445,12 +445,12 @@
 
       //remove the container from the list of running containers, if failed with a non-zero exit code, add it to the list of
       //failed containers.
-      if(!completedProcessorID.equals(INVALID_PROCESSOR_ID)){
-        if(state.runningProcessors.containsKey(completedProcessorID)) {
+      if (!completedProcessorID.equals(INVALID_PROCESSOR_ID)) {
+        if (state.runningProcessors.containsKey(completedProcessorID)) {
           log.info("Removing Processor ID: {} from YarnClusterResourceManager running processors.", completedProcessorID);
           state.runningProcessors.remove(completedProcessorID);
 
-          if(status.getExitStatus() != ContainerExitStatus.SUCCESS)
+          if (status.getExitStatus() != ContainerExitStatus.SUCCESS)
             state.failedContainersStatus.put(status.getContainerId().toString(), status);
         }
       }
@@ -465,19 +465,20 @@
    */
   @Override
   public void onContainersAllocated(List<Container> containers) {
-      List<SamzaResource> resources = new ArrayList<SamzaResource>();
-      for(Container container : containers) {
-          log.info("Got allocation notification for Container ID: {} on host: {}", container.getId(), container.getNodeId().getHost());
-          String containerId = container.getId().toString();
-          String host = container.getNodeId().getHost();
-          int memory = container.getResource().getMemory();
-          int numCores = container.getResource().getVirtualCores();
+    List<SamzaResource> resources = new ArrayList<SamzaResource>();
+    for (Container container : containers) {
+      log.info("Got allocation notification for Container ID: {} on host: {}", container.getId(),
+          container.getNodeId().getHost());
+      String containerId = container.getId().toString();
+      String host = container.getNodeId().getHost();
+      int memory = container.getResource().getMemory();
+      int numCores = container.getResource().getVirtualCores();
 
-          SamzaResource resource = new SamzaResource(numCores, memory, host, containerId);
-          allocatedResources.put(resource, container);
-          resources.add(resource);
-      }
-      clusterManagerCallback.onResourcesAvailable(resources);
+      SamzaResource resource = new SamzaResource(numCores, memory, host, containerId);
+      allocatedResources.put(resource, container);
+      resources.add(resource);
+    }
+    clusterManagerCallback.onResourcesAvailable(resources);
   }
 
   //The below methods are specific to the Yarn AMRM Client. We currently don't handle scenarios where there are
@@ -513,7 +514,7 @@
   public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) {
     String processorId = getPendingProcessorId(containerId);
     if (processorId != null) {
-    log.info("Got start notification for Container ID: {} for Processor ID: {}", containerId, processorId);
+      log.info("Got start notification for Container ID: {} for Processor ID: {}", containerId, processorId);
       // 1. Move the processor from pending to running state
       final YarnContainer container = state.pendingProcessors.remove(processorId);
 
@@ -570,6 +571,9 @@
     if (processorId != null) {
       log.info("Got stop error notification for Container ID: {} for Processor ID: {}", containerId, processorId, t);
       YarnContainer container = state.runningProcessors.get(processorId);
+      SamzaResource resource = new SamzaResource(container.resource().getVirtualCores(),
+          container.resource().getMemory(), container.nodeId().getHost(), containerId.toString());
+      clusterManagerCallback.onStreamProcessorStopFailure(resource, t);
     } else {
       log.warn("Did not find the running Processor ID for the stop error notification for Container ID: {}. " +
           "Ignoring notification", containerId);
@@ -661,7 +665,11 @@
     ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
     context.setEnvironment(env);
     context.setTokens(allTokens.duplicate());
-    context.setCommands(new ArrayList<String>() {{add(cmd);}});
+    context.setCommands(new ArrayList<String>() {
+      {
+        add(cmd);
+      }
+    });
     context.setLocalResources(localResourceMap);
 
     if (UserGroupInformation.isSecurityEnabled()) {
diff --git a/samza-yarn/src/main/java/org/apache/samza/validation/YarnJobValidationTool.java b/samza-yarn/src/main/java/org/apache/samza/validation/YarnJobValidationTool.java
index bbcc976..2b31977 100644
--- a/samza-yarn/src/main/java/org/apache/samza/validation/YarnJobValidationTool.java
+++ b/samza-yarn/src/main/java/org/apache/samza/validation/YarnJobValidationTool.java
@@ -61,7 +61,7 @@
  *
  * When running this tool, please provide the configuration URI of job. For example:
  *
- * deploy/samza/bin/validate-yarn-job.sh --config-factory=org.apache.samza.config.factories.PropertiesConfigFactory --config-path=file://$PWD/deploy/samza/config/wikipedia-feed.properties [--metrics-validator=com.foo.bar.SomeMetricsValidator]
+ * deploy/samza/bin/validate-yarn-job.sh --config job.config.loader.factory=org.apache.samza.config.loaders.PropertiesConfigLoaderFactory --config job.config.loader.properties.path=$PWD/deploy/samza/config/wikipedia-feed.properties [--metrics-validator=com.foo.bar.SomeMetricsValidator]
  *
  * The tool prints out the validation result in each step and throws an exception when the
  * validation fails.
@@ -93,7 +93,7 @@
       appId = validateAppId();
       attemptId = validateRunningAttemptId(appId);
       validateContainerCount(attemptId);
-      if(validator != null) {
+      if (validator != null) {
         validateJmxMetrics();
       }
 
@@ -108,10 +108,10 @@
     // fetch only the last created application with the job name and id
     // i.e. get the application with max appId
     ApplicationId appId = null;
-    for(ApplicationReport applicationReport : this.client.getApplications()) {
-      if(applicationReport.getName().equals(this.jobName)) {
+    for (ApplicationReport applicationReport : this.client.getApplications()) {
+      if (applicationReport.getName().equals(this.jobName)) {
         ApplicationId id = applicationReport.getApplicationId();
-        if(appId == null || appId.compareTo(id) < 0) {
+        if (appId == null || appId.compareTo(id) < 0) {
           appId = id;
         }
       }
@@ -137,8 +137,8 @@
 
   public int validateContainerCount(ApplicationAttemptId attemptId) throws Exception {
     int runningContainerCount = 0;
-    for(ContainerReport containerReport : this.client.getContainers(attemptId)) {
-      if(containerReport.getContainerState() == ContainerState.RUNNING) {
+    for (ContainerReport containerReport : this.client.getContainers(attemptId)) {
+      if (containerReport.getContainerState() == ContainerState.RUNNING) {
         ++runningContainerCount;
       }
     }
@@ -157,7 +157,7 @@
     MetricsRegistry metricsRegistry = new MetricsRegistryMap();
     CoordinatorStreamStore coordinatorStreamStore = new CoordinatorStreamStore(config, metricsRegistry);
     coordinatorStreamStore.init();
-    try{
+    try {
       Config configFromCoordinatorStream = CoordinatorStreamUtil.readConfigFromCoordinatorStream(coordinatorStreamStore);
       ChangelogStreamManager changelogStreamManager = new ChangelogStreamManager(coordinatorStreamStore);
       JobModelManager jobModelManager =
@@ -181,7 +181,7 @@
     }
   }
 
-  public static void main(String [] args) throws Exception {
+  public static void main(String[] args) throws Exception {
     CommandLine cmdline = new CommandLine();
     OptionParser parser = cmdline.parser();
     OptionSpec<String> validatorOpt = parser.accepts("metrics-validator", "The metrics validator class.")
diff --git a/samza-yarn/src/main/java/org/apache/samza/webapp/ApplicationMasterRestClient.java b/samza-yarn/src/main/java/org/apache/samza/webapp/ApplicationMasterRestClient.java
index eed16db..354e71e 100644
--- a/samza-yarn/src/main/java/org/apache/samza/webapp/ApplicationMasterRestClient.java
+++ b/samza-yarn/src/main/java/org/apache/samza/webapp/ApplicationMasterRestClient.java
@@ -53,7 +53,7 @@
    */
   public Map<String, Map<String, Object>> getMetrics() throws IOException {
     String jsonString = getEntityAsJson("/metrics", "metrics");
-    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Map<String, Object>>>() {});
+    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Map<String, Object>>>() { });
   }
 
   /**
@@ -62,7 +62,7 @@
    */
   public Map<String, Object> getTaskContext() throws IOException {
     String jsonString = getEntityAsJson("/task-context", "task context");
-    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Object>>() {});
+    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Object>>() { });
   }
 
   /**
@@ -71,7 +71,7 @@
    */
   public Map<String, Object> getAmState() throws IOException {
     String jsonString = getEntityAsJson("/am", "AM state");
-    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Object>>() {});
+    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Object>>() { });
   }
 
   /**
@@ -80,7 +80,7 @@
    */
   public Map<String, Object> getConfig() throws IOException {
     String jsonString = getEntityAsJson("/config", "config");
-    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Object>>() {});
+    return jsonMapper.readValue(jsonString, new TypeReference<Map<String, Object>>() { });
   }
 
   @Override
diff --git a/samza-yarn/src/main/scala/org/apache/samza/job/yarn/ClientHelper.scala b/samza-yarn/src/main/scala/org/apache/samza/job/yarn/ClientHelper.scala
index 196ac91..4c3c93e 100644
--- a/samza-yarn/src/main/scala/org/apache/samza/job/yarn/ClientHelper.scala
+++ b/samza-yarn/src/main/scala/org/apache/samza/job/yarn/ClientHelper.scala
@@ -301,8 +301,8 @@
   }
 
   private def isActiveApplication(applicationReport: ApplicationReport): Boolean = {
-    (Running.equals(toAppStatus(applicationReport).get)
-    || New.equals(toAppStatus(applicationReport).get))
+    val status = toAppStatus(applicationReport).get
+    Running.equals(status) || New.equals(status)
   }
 
   def toAppStatus(applicationReport: ApplicationReport): Option[ApplicationStatus] = {
diff --git a/samza-yarn/src/main/scala/org/apache/samza/job/yarn/YarnJob.scala b/samza-yarn/src/main/scala/org/apache/samza/job/yarn/YarnJob.scala
index 43e6a7c..ca681f5 100644
--- a/samza-yarn/src/main/scala/org/apache/samza/job/yarn/YarnJob.scala
+++ b/samza-yarn/src/main/scala/org/apache/samza/job/yarn/YarnJob.scala
@@ -184,12 +184,10 @@
         Util.envVarEscape(SamzaObjectMapper.getObjectMapper.writeValueAsString(coordinatorSystemConfig))
     }
     envMapBuilder += ShellCommandConfig.ENV_JAVA_OPTS -> Util.envVarEscape(yarnConfig.getAmOpts)
-    val clusterBasedJobCoordinatorDependencyIsolationEnabled =
-      jobConfig.getClusterBasedJobCoordinatorDependencyIsolationEnabled
-    envMapBuilder += ShellCommandConfig.ENV_CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED ->
-      Util.envVarEscape(Boolean.toString(clusterBasedJobCoordinatorDependencyIsolationEnabled))
-    if (clusterBasedJobCoordinatorDependencyIsolationEnabled) {
-      // dependency isolation is enabled, so need to specify where the application lib directory is for app resources
+    val splitDeploymentEnabled = jobConfig.isSplitDeploymentEnabled
+    envMapBuilder += ShellCommandConfig.ENV_SPLIT_DEPLOYMENT_ENABLED -> Util.envVarEscape(Boolean.toString(splitDeploymentEnabled))
+    if (splitDeploymentEnabled) {
+      //split deployment is enabled, so need to specify where the application lib directory is for app resources
       envMapBuilder += ShellCommandConfig.ENV_APPLICATION_LIB_DIR ->
         Util.envVarEscape(String.format("./%s/lib", DependencyIsolationUtils.APPLICATION_DIRECTORY))
     }
@@ -206,7 +204,7 @@
   @VisibleForTesting
   private[yarn] def buildJobCoordinatorCmd(config: Config, jobConfig: JobConfig): String = {
     var cmdExec = "./__package/bin/run-jc.sh" // default location
-    if (jobConfig.getClusterBasedJobCoordinatorDependencyIsolationEnabled) {
+    if (jobConfig.isSplitDeploymentEnabled) {
       cmdExec = "./%s/bin/run-jc.sh" format DependencyIsolationUtils.FRAMEWORK_INFRASTRUCTURE_DIRECTORY
       logger.info("Using isolated cluster-based job coordinator path: %s" format cmdExec)
     }
diff --git a/samza-yarn/src/test/java/org/apache/samza/config/TestYarnConfig.java b/samza-yarn/src/test/java/org/apache/samza/config/TestYarnConfig.java
index 1018a75..ae4bf25 100644
--- a/samza-yarn/src/test/java/org/apache/samza/config/TestYarnConfig.java
+++ b/samza-yarn/src/test/java/org/apache/samza/config/TestYarnConfig.java
@@ -19,12 +19,10 @@
 package org.apache.samza.config;
 
 import java.util.Collections;
-import java.util.Optional;
 import org.apache.samza.SamzaException;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
 
 public class TestYarnConfig {
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestFileSystemImplConfig.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestFileSystemImplConfig.java
index 63d83df..b906efd 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestFileSystemImplConfig.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestFileSystemImplConfig.java
@@ -67,13 +67,13 @@
 
     FileSystemImplConfig manager = new FileSystemImplConfig(conf);
 
-    Map<String, String> expectedFsHttpImplConfs = ImmutableMap.of( //Scheme with additional subkeys
+    Map<String, String> expectedFsHttpImplConfs = ImmutableMap.of(//Scheme with additional subkeys
         "fs.http.impl", "org.apache.samza.HttpFileSystem",
         "fs.http.impl.key1", "val1",
         "fs.http.impl.key2", "val2"
     );
 
-    Map<String, String> expectedFsMyschemeImplConfs = ImmutableMap.of( // Scheme without subkeys
+    Map<String, String> expectedFsMyschemeImplConfs = ImmutableMap.of(// Scheme without subkeys
         "fs.myscheme.impl", "org.apache.samza.MySchemeFileSystem"
     );
 
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceConfig.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceConfig.java
index e003125..6d0b8af 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceConfig.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceConfig.java
@@ -32,7 +32,7 @@
 public class TestLocalizerResourceConfig {
 
   @Rule
-  public ExpectedException thrown= ExpectedException.none();
+  public ExpectedException thrown = ExpectedException.none();
 
   @Test
   public void testResourceConfigIncluded() {
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceMapper.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceMapper.java
index d065019..6ff8df1 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceMapper.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestLocalizerResourceMapper.java
@@ -37,7 +37,7 @@
 public class TestLocalizerResourceMapper {
 
   @Rule
-  public ExpectedException thrown= ExpectedException.none();
+  public ExpectedException thrown = ExpectedException.none();
 
   @Test
   public void testResourceMapSuccess() {
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnClusterResourceManager.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnClusterResourceManager.java
index 8f19eab..0898800 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnClusterResourceManager.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnClusterResourceManager.java
@@ -65,7 +65,7 @@
         new YarnContainer(Container.newInstance(
             ContainerId.newContainerId(
                 ApplicationAttemptId.newInstance(
-                    ApplicationId.newInstance(10000l, 1), 1), 1),
+                    ApplicationId.newInstance(10000L, 1), 1), 1),
             NodeId.newInstance("host1", 8088), "http://host1",
             Resource.newInstance(1024, 1), Priority.newInstance(1),
             Token.newInstance("id".getBytes(), "read", "password".getBytes(), "service"))));
@@ -75,7 +75,7 @@
 
     yarnClusterResourceManager.onStartContainerError(ContainerId.newContainerId(
         ApplicationAttemptId.newInstance(
-            ApplicationId.newInstance(10000l, 1), 1), 1),
+            ApplicationId.newInstance(10000L, 1), 1), 1),
         new Exception());
 
     assertEquals(0, yarnAppState.pendingProcessors.size());
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJob.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJob.java
index 7961360..4858d76 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJob.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJob.java
@@ -43,12 +43,11 @@
     Config config = new MapConfig();
     assertEquals("./__package/bin/run-jc.sh", YarnJob$.MODULE$.buildJobCoordinatorCmd(config, new JobConfig(config)));
 
-    // cluster-based job coordinator dependency isolation is enabled; use script from framework infrastructure directory
-    Config configJobCoordinatorDependencyIsolationEnabled =
-        new MapConfig(ImmutableMap.of(JobConfig.CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "true"));
+    // split deployment is enabled; use script from framework infrastructure directory
+    Config splitDeploymentEnabled =
+        new MapConfig(ImmutableMap.of(JobConfig.JOB_SPLIT_DEPLOYMENT_ENABLED, "true"));
     assertEquals(String.format("./%s/bin/run-jc.sh", DependencyIsolationUtils.FRAMEWORK_INFRASTRUCTURE_DIRECTORY),
-        YarnJob$.MODULE$.buildJobCoordinatorCmd(configJobCoordinatorDependencyIsolationEnabled,
-            new JobConfig(configJobCoordinatorDependencyIsolationEnabled)));
+        YarnJob$.MODULE$.buildJobCoordinatorCmd(splitDeploymentEnabled, new JobConfig(splitDeploymentEnabled)));
   }
 
   @Test
@@ -59,14 +58,14 @@
         .put(JobConfig.JOB_ID, "jobId")
         .put(JobConfig.JOB_COORDINATOR_SYSTEM, "jobCoordinatorSystem")
         .put(YarnConfig.AM_JVM_OPTIONS, amJvmOptions) // needs escaping
-        .put(JobConfig.CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "false")
+        .put(JobConfig.JOB_SPLIT_DEPLOYMENT_ENABLED, "false")
         .build());
     String expectedCoordinatorStreamConfigStringValue = Util.envVarEscape(SamzaObjectMapper.getObjectMapper()
         .writeValueAsString(CoordinatorStreamUtil.buildCoordinatorStreamConfig(config)));
     Map<String, String> expected = ImmutableMap.of(
         ShellCommandConfig.ENV_COORDINATOR_SYSTEM_CONFIG, expectedCoordinatorStreamConfigStringValue,
         ShellCommandConfig.ENV_JAVA_OPTS, Util.envVarEscape(amJvmOptions),
-        ShellCommandConfig.ENV_CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "false");
+        ShellCommandConfig.ENV_SPLIT_DEPLOYMENT_ENABLED, "false");
     assertEquals(expected, JavaConverters.mapAsJavaMapConverter(
         YarnJob$.MODULE$.buildEnvironment(config, new YarnConfig(config), new JobConfig(config))).asJava());
   }
@@ -78,14 +77,14 @@
         .put(JobConfig.JOB_ID, "jobId")
         .put(JobConfig.JOB_COORDINATOR_SYSTEM, "jobCoordinatorSystem")
         .put(YarnConfig.AM_JVM_OPTIONS, "")
-        .put(JobConfig.CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "true")
+        .put(JobConfig.JOB_SPLIT_DEPLOYMENT_ENABLED, "true")
         .build());
     String expectedCoordinatorStreamConfigStringValue = Util.envVarEscape(SamzaObjectMapper.getObjectMapper()
         .writeValueAsString(CoordinatorStreamUtil.buildCoordinatorStreamConfig(config)));
     Map<String, String> expected = ImmutableMap.of(
         ShellCommandConfig.ENV_COORDINATOR_SYSTEM_CONFIG, expectedCoordinatorStreamConfigStringValue,
         ShellCommandConfig.ENV_JAVA_OPTS, "",
-        ShellCommandConfig.ENV_CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "true",
+        ShellCommandConfig.ENV_SPLIT_DEPLOYMENT_ENABLED, "true",
         ShellCommandConfig.ENV_APPLICATION_LIB_DIR, "./__package/lib");
     assertEquals(expected, JavaConverters.mapAsJavaMapConverter(
         YarnJob$.MODULE$.buildEnvironment(config, new YarnConfig(config), new JobConfig(config))).asJava());
@@ -98,7 +97,7 @@
         .put(JobConfig.JOB_ID, "jobId")
         .put(JobConfig.JOB_COORDINATOR_SYSTEM, "jobCoordinatorSystem")
         .put(YarnConfig.AM_JVM_OPTIONS, "")
-        .put(JobConfig.CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "false")
+        .put(JobConfig.JOB_SPLIT_DEPLOYMENT_ENABLED, "false")
         .put(YarnConfig.AM_JAVA_HOME, "/some/path/to/java/home")
         .build());
     String expectedCoordinatorStreamConfigStringValue = Util.envVarEscape(SamzaObjectMapper.getObjectMapper()
@@ -106,7 +105,7 @@
     Map<String, String> expected = ImmutableMap.of(
         ShellCommandConfig.ENV_COORDINATOR_SYSTEM_CONFIG, expectedCoordinatorStreamConfigStringValue,
         ShellCommandConfig.ENV_JAVA_OPTS, "",
-        ShellCommandConfig.ENV_CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "false",
+        ShellCommandConfig.ENV_SPLIT_DEPLOYMENT_ENABLED, "false",
         ShellCommandConfig.ENV_JAVA_HOME, "/some/path/to/java/home");
     assertEquals(expected, JavaConverters.mapAsJavaMapConverter(
         YarnJob$.MODULE$.buildEnvironment(config, new YarnConfig(config), new JobConfig(config))).asJava());
@@ -119,14 +118,14 @@
         .put(JobConfig.JOB_ID, "jobId")
         .put(JobConfig.CONFIG_LOADER_FACTORY, "org.apache.samza.config.loaders.PropertiesConfigLoaderFactory")
         .put(YarnConfig.AM_JVM_OPTIONS, "")
-        .put(JobConfig.CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "true")
+        .put(JobConfig.JOB_SPLIT_DEPLOYMENT_ENABLED, "true")
         .build());
     String expectedSubmissionConfig = Util.envVarEscape(SamzaObjectMapper.getObjectMapper()
         .writeValueAsString(config));
     Map<String, String> expected = ImmutableMap.of(
         ShellCommandConfig.ENV_SUBMISSION_CONFIG, expectedSubmissionConfig,
         ShellCommandConfig.ENV_JAVA_OPTS, "",
-        ShellCommandConfig.ENV_CLUSTER_BASED_JOB_COORDINATOR_DEPENDENCY_ISOLATION_ENABLED, "true",
+        ShellCommandConfig.ENV_SPLIT_DEPLOYMENT_ENABLED, "true",
         ShellCommandConfig.ENV_APPLICATION_LIB_DIR, "./__package/lib");
     assertEquals(expected, JavaConverters.mapAsJavaMapConverter(
         YarnJob$.MODULE$.buildEnvironment(config, new YarnConfig(config), new JobConfig(config))).asJava());
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJobFactory.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJobFactory.java
index 12d45f5..9ba6232 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJobFactory.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/TestYarnJobFactory.java
@@ -40,7 +40,7 @@
     YarnJobFactory jobFactory = new YarnJobFactory();
     YarnJob yarnJob = jobFactory.getJob(new MapConfig(ImmutableMap.of(
         "fs.http.impl", "org.apache.myHttp",
-        "fs.myscheme.impl","org.apache.myScheme")));
+        "fs.myscheme.impl", "org.apache.myScheme")));
     Configuration hConfig = yarnJob.client().yarnClient().getConfig();
     assertEquals("org.apache.myHttp", hConfig.get("fs.http.impl"));
     assertEquals("org.apache.myScheme", hConfig.get("fs.myscheme.impl"));
@@ -50,8 +50,8 @@
   public void  testGetJobWithFsImplSubkeys() {
     YarnJobFactory jobFactory = new YarnJobFactory();
     YarnJob yarnJob = jobFactory.getJob(new MapConfig(ImmutableMap.of(
-        "fs.myscheme.impl","org.apache.myScheme",
-        "fs.myscheme.impl.client","org.apache.mySchemeClient")));
+        "fs.myscheme.impl", "org.apache.myScheme",
+        "fs.myscheme.impl.client", "org.apache.mySchemeClient")));
     Configuration hConfig = yarnJob.client().yarnClient().getConfig();
     assertEquals("org.apache.myScheme", hConfig.get("fs.myscheme.impl"));
     assertEquals("org.apache.mySchemeClient", hConfig.get("fs.myscheme.impl.client"));
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockContainerListener.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockContainerListener.java
index 43bda8f..69c9745 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockContainerListener.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockContainerListener.java
@@ -21,7 +21,6 @@
 
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
-import org.apache.hadoop.yarn.api.records.Container;
 
 import static org.junit.Assert.assertTrue;
 
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockHttpServer.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockHttpServer.java
index a6a31d7..7435f33 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockHttpServer.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/MockHttpServer.java
@@ -43,7 +43,7 @@
 
   @Override
   public URL getUrl() {
-    if(running()) {
+    if (running()) {
       try {
         return new URL("http://localhost:12345/");
       } catch (MalformedURLException mue) {
diff --git a/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/hadoop/TestHttpFileSystem.java b/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/hadoop/TestHttpFileSystem.java
index 6f42856..53f8c40 100644
--- a/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/hadoop/TestHttpFileSystem.java
+++ b/samza-yarn/src/test/java/org/apache/samza/job/yarn/util/hadoop/TestHttpFileSystem.java
@@ -85,13 +85,13 @@
 
           //Hang the connection until the read timeout expires on the client side.
           if (numBytesWritten >= THRESHOLD_BYTES) {
-            if(!serverWaitLatch.await(5, TimeUnit.SECONDS)) {
+            if (!serverWaitLatch.await(5, TimeUnit.SECONDS)) {
               throw new IOException("Timed out waiting for latch");
             }
             break;
           }
         }
-      } catch(Exception e) {
+      } catch (Exception e) {
         //Record any exception that may have occurred
         LOG.error("{}", e);
         serverException = e;
@@ -129,10 +129,10 @@
         while (in.read() >= 0) {
           totalBytesRead++;
         }
-      } catch(SocketTimeoutException e) {
+      } catch (SocketTimeoutException e) {
         //Expect the socket to timeout after THRESHOLD bytes have been read.
         serverWaitLatch.countDown();
-      } catch(Exception e) {
+      } catch (Exception e) {
         //Record any exception that may have occurred.
         LOG.error("{}", e);
         clientException = e;
diff --git a/samza-yarn/src/test/java/org/apache/samza/validation/MockMetricsValidator.java b/samza-yarn/src/test/java/org/apache/samza/validation/MockMetricsValidator.java
index c3cf935..de0a980 100644
--- a/samza-yarn/src/test/java/org/apache/samza/validation/MockMetricsValidator.java
+++ b/samza-yarn/src/test/java/org/apache/samza/validation/MockMetricsValidator.java
@@ -36,9 +36,11 @@
   @Override
   public void validate(MetricsAccessor accessor) throws MetricsValidationFailureException {
     Map<String, Long> commitCalls = accessor.getCounterValues(SamzaContainerMetrics.class.getName(), "commit-calls");
-    if(commitCalls.isEmpty()) throw new MetricsValidationFailureException("no value");
-    for(Map.Entry<String, Long> entry: commitCalls.entrySet()) {
-      if(entry.getValue() <= 0) {
+    if (commitCalls.isEmpty()) {
+      throw new MetricsValidationFailureException("no value");
+    }
+    for (Map.Entry<String, Long> entry : commitCalls.entrySet()) {
+      if (entry.getValue() <= 0) {
         throw new MetricsValidationFailureException("commit call <= 0");
       }
     }
diff --git a/samza-yarn/src/test/java/org/apache/samza/webapp/TestApplicationMasterRestClient.java b/samza-yarn/src/test/java/org/apache/samza/webapp/TestApplicationMasterRestClient.java
index 0dbbed0..338b455 100644
--- a/samza-yarn/src/test/java/org/apache/samza/webapp/TestApplicationMasterRestClient.java
+++ b/samza-yarn/src/test/java/org/apache/samza/webapp/TestApplicationMasterRestClient.java
@@ -300,7 +300,7 @@
       containers.put(yarnContainerId, containerMap);
     });
 
-    return jsonMapper.readValue(jsonMapper.writeValueAsString(containers), new TypeReference<Map<String, Map<String, Object>>>() {});
+    return jsonMapper.readValue(jsonMapper.writeValueAsString(containers), new TypeReference<Map<String, Map<String, Object>>>() { });
   }
 
   private void assignMetricValues(SamzaApplicationState samzaAppState, MetricsRegistryMap registry) {
diff --git a/samza-yarn/src/test/java/org/apache/samza/webapp/TestYarnContainerHeartbeatServlet.java b/samza-yarn/src/test/java/org/apache/samza/webapp/TestYarnContainerHeartbeatServlet.java
index f7ec995..8987834 100644
--- a/samza-yarn/src/test/java/org/apache/samza/webapp/TestYarnContainerHeartbeatServlet.java
+++ b/samza-yarn/src/test/java/org/apache/samza/webapp/TestYarnContainerHeartbeatServlet.java
@@ -73,10 +73,10 @@
   @Test
   public void testContainerHeartbeatWhenValid()
       throws IOException {
-    String VALID_CONTAINER_ID = "container_1350670447861_0003_01_000002";
-    when(container.id()).thenReturn(ConverterUtils.toContainerId(VALID_CONTAINER_ID));
-    yarnAppState.runningProcessors.put(VALID_CONTAINER_ID, container);
-    URL url = new URL(webApp.getUrl().toString() + "containerHeartbeat?executionContainerId=" + VALID_CONTAINER_ID);
+    String validContainerId = "container_1350670447861_0003_01_000002";
+    when(container.id()).thenReturn(ConverterUtils.toContainerId(validContainerId));
+    yarnAppState.runningProcessors.put(validContainerId, container);
+    URL url = new URL(webApp.getUrl().toString() + "containerHeartbeat?executionContainerId=" + validContainerId);
     String response = HttpUtil.read(url, 1000, new ExponentialSleepStrategy());
     heartbeat = mapper.readValue(response, ContainerHeartbeatResponse.class);
     Assert.assertTrue(heartbeat.isAlive());
@@ -85,11 +85,11 @@
   @Test
   public void testContainerHeartbeatWhenInvalid()
       throws IOException {
-    String VALID_CONTAINER_ID = "container_1350670447861_0003_01_000003";
-    String INVALID_CONTAINER_ID = "container_1350670447861_0003_01_000002";
-    when(container.id()).thenReturn(ConverterUtils.toContainerId(VALID_CONTAINER_ID));
-    yarnAppState.runningProcessors.put(VALID_CONTAINER_ID, container);
-    URL url = new URL(webApp.getUrl().toString() + "containerHeartbeat?executionContainerId=" + INVALID_CONTAINER_ID);
+    String validContainerId = "container_1350670447861_0003_01_000003";
+    String invalidContainerId = "container_1350670447861_0003_01_000002";
+    when(container.id()).thenReturn(ConverterUtils.toContainerId(validContainerId));
+    yarnAppState.runningProcessors.put(validContainerId, container);
+    URL url = new URL(webApp.getUrl().toString() + "containerHeartbeat?executionContainerId=" + invalidContainerId);
     String response = HttpUtil.read(url, 1000, new ExponentialSleepStrategy());
     heartbeat = mapper.readValue(response, ContainerHeartbeatResponse.class);
     Assert.assertFalse(heartbeat.isAlive());