APEXMALHAR-2517 imports/checkstyle #662
diff --git a/apps/filecopy/pom.xml b/apps/filecopy/pom.xml
index d6a0ccd..69b2e95 100644
--- a/apps/filecopy/pom.xml
+++ b/apps/filecopy/pom.xml
@@ -24,7 +24,7 @@
   <parent>
     <artifactId>malhar-apps</artifactId>
     <groupId>org.apache.apex</groupId>
-    <version>3.9.0-SNAPSHOT</version>
+    <version>4.0.0-SNAPSHOT</version>
   </parent>
 
   <artifactId>filecopy</artifactId>
@@ -79,7 +79,7 @@
     <dependency>
       <groupId>org.apache.apex</groupId>
       <artifactId>malhar-library</artifactId>
-      <version>3.9.0-SNAPSHOT</version>
+      <version>4.0.0-SNAPSHOT</version>
       <exclusions>
         <exclusion>
           <groupId>*</groupId>
diff --git a/apps/filecopy/src/main/java/com/datatorrent/apps/copy/HDFSFileCopyApp.java b/apps/filecopy/src/main/java/com/datatorrent/apps/copy/HDFSFileCopyApp.java
index 2688f1c..bb1e1d9 100644
--- a/apps/filecopy/src/main/java/com/datatorrent/apps/copy/HDFSFileCopyApp.java
+++ b/apps/filecopy/src/main/java/com/datatorrent/apps/copy/HDFSFileCopyApp.java
@@ -19,14 +19,14 @@
 
 package com.datatorrent.apps.copy;
 
+import org.apache.apex.malhar.lib.io.fs.FSInputModule;
+import org.apache.apex.malhar.lib.io.fs.HDFSFileCopyModule;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.io.fs.FSInputModule;
-import com.datatorrent.lib.io.fs.HDFSFileCopyModule;
 
 /**
  * Application for HDFS to HDFS file copy
diff --git a/apps/logstream/pom.xml b/apps/logstream/pom.xml
index 07a5eee..13cc865 100644
--- a/apps/logstream/pom.xml
+++ b/apps/logstream/pom.xml
@@ -24,7 +24,7 @@
   <parent>
     <artifactId>malhar-apps</artifactId>
     <groupId>org.apache.apex</groupId>
-    <version>3.9.0-SNAPSHOT</version>
+    <version>4.0.0-SNAPSHOT</version>
   </parent>
 
   <groupId>org.apache.apex</groupId>
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/AggregationsToRedisOperator.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/AggregationsToRedisOperator.java
index 495987e..74997f4 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/AggregationsToRedisOperator.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/AggregationsToRedisOperator.java
@@ -22,8 +22,8 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import com.datatorrent.lib.logs.DimensionObject;
-import com.datatorrent.lib.util.KeyValPair;
+import org.apache.apex.malhar.lib.logs.DimensionObject;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 
 import com.datatorrent.common.util.BaseOperator;
 import com.datatorrent.api.DefaultInputPort;
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application.java
index 82c9214..e3858b4 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application.java
@@ -28,25 +28,26 @@
 import org.apache.apex.malhar.contrib.misc.streamquery.SelectOperator;
 import org.apache.apex.malhar.contrib.misc.streamquery.condition.EqualValueCondition;
 import org.apache.apex.malhar.lib.utils.PubSubHelper;
+import org.apache.apex.malhar.contrib.redis.RedisKeyValPairOutputOperator;
+import org.apache.apex.malhar.contrib.redis.RedisMapOutputOperator;
+import org.apache.apex.malhar.contrib.redis.RedisNumberSummationMapOutputOperator;
+import org.apache.apex.malhar.lib.algo.TopN;
+import org.apache.apex.malhar.lib.io.ConsoleOutputOperator;
+import org.apache.apex.malhar.lib.io.PubSubWebSocketOutputOperator;
+import org.apache.apex.malhar.lib.logs.DimensionObject;
+import org.apache.apex.malhar.lib.logs.MultiWindowDimensionAggregation;
+import org.apache.apex.malhar.lib.logs.MultiWindowDimensionAggregation.AggregateOperation;
+import org.apache.apex.malhar.lib.stream.Counter;
+import org.apache.apex.malhar.lib.stream.JsonByteArrayOperator;
+import org.apache.apex.malhar.lib.streamquery.index.ColumnIndex;
+import org.apache.apex.malhar.lib.util.AbstractDimensionTimeBucketOperator;
+import org.apache.apex.malhar.lib.util.DimensionTimeBucketSumOperator;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.Operator.InputPort;
 import com.datatorrent.api.StreamingApplication;
-import com.datatorrent.contrib.redis.RedisKeyValPairOutputOperator;
-import com.datatorrent.contrib.redis.RedisMapOutputOperator;
-import com.datatorrent.contrib.redis.RedisNumberSummationMapOutputOperator;
-import com.datatorrent.lib.algo.TopN;
-import com.datatorrent.lib.io.ConsoleOutputOperator;
-import com.datatorrent.lib.io.PubSubWebSocketOutputOperator;
-import com.datatorrent.lib.logs.DimensionObject;
-import com.datatorrent.lib.logs.MultiWindowDimensionAggregation;
-import com.datatorrent.lib.logs.MultiWindowDimensionAggregation.AggregateOperation;
-import com.datatorrent.lib.stream.Counter;
-import com.datatorrent.lib.stream.JsonByteArrayOperator;
-import com.datatorrent.lib.streamquery.index.ColumnIndex;
-import com.datatorrent.lib.util.AbstractDimensionTimeBucketOperator;
-import com.datatorrent.lib.util.DimensionTimeBucketSumOperator;
 
 /**
  * Log stream processing application based on Apex platform.<br>
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application1.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application1.java
index 333b877..a458b3c 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application1.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/Application1.java
@@ -18,11 +18,10 @@
  */
 package com.datatorrent.apps.logstream;
 
+import org.apache.apex.malhar.lib.io.ConsoleOutputOperator;
+import org.apache.apex.malhar.lib.stream.JsonByteArrayOperator;
 import org.apache.hadoop.conf.Configuration;
 
-import com.datatorrent.lib.io.ConsoleOutputOperator;
-import com.datatorrent.lib.stream.JsonByteArrayOperator;
-
 import com.datatorrent.api.Context.PortContext;
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.StreamingApplication;
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperator.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperator.java
index 2650795..de96781 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperator.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperator.java
@@ -28,10 +28,11 @@
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.codec.KryoSerializableStreamCodec;
+import org.apache.apex.malhar.lib.logs.DimensionObject;
 import org.apache.commons.lang.mutable.MutableDouble;
 
-import com.datatorrent.lib.codec.KryoSerializableStreamCodec;
-import com.datatorrent.lib.logs.DimensionObject;
 import com.datatorrent.api.*;
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.Partitioner.PartitioningContext;
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperatorUnifier.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperatorUnifier.java
index e495ee6..606edb7 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperatorUnifier.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/DimensionOperatorUnifier.java
@@ -26,10 +26,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.logs.DimensionObject;
 import org.apache.commons.lang.mutable.MutableDouble;
 
-import com.datatorrent.lib.logs.DimensionObject;
-
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.Operator.Unifier;
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogScoreOperator.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogScoreOperator.java
index 57c558b..74dc3a6 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogScoreOperator.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogScoreOperator.java
@@ -20,10 +20,12 @@
 
 import com.datatorrent.common.util.BaseOperator;
 import com.datatorrent.api.DefaultInputPort;
-import com.datatorrent.lib.logs.DimensionObject;
+
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.logs.DimensionObject;
+
 /**
  * Log Score
  *
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamTopN.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamTopN.java
index a09e64f..8df62fa 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamTopN.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamTopN.java
@@ -28,9 +28,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.datatorrent.lib.algo.TopN;
-import com.datatorrent.lib.codec.KryoSerializableStreamCodec;
-import com.datatorrent.lib.logs.DimensionObject;
+import org.apache.apex.malhar.lib.algo.TopN;
+import org.apache.apex.malhar.lib.codec.KryoSerializableStreamCodec;
+import org.apache.apex.malhar.lib.logs.DimensionObject;
 
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultPartition;
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamWidgetOutputOperator.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamWidgetOutputOperator.java
index 29c92e6..715cb6b 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamWidgetOutputOperator.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/LogstreamWidgetOutputOperator.java
@@ -26,11 +26,10 @@
 
 import javax.validation.constraints.NotNull;
 
+import org.apache.apex.malhar.lib.io.WidgetOutputOperator;
+import org.apache.apex.malhar.lib.logs.DimensionObject;
 import org.apache.commons.lang3.tuple.MutablePair;
 
-import com.datatorrent.lib.io.WidgetOutputOperator;
-import com.datatorrent.lib.logs.DimensionObject;
-
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultInputPort;
 
diff --git a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/RabbitMQLogsInputOperator.java b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/RabbitMQLogsInputOperator.java
index a72dd05..a4e7fd2 100644
--- a/apps/logstream/src/main/java/com/datatorrent/apps/logstream/RabbitMQLogsInputOperator.java
+++ b/apps/logstream/src/main/java/com/datatorrent/apps/logstream/RabbitMQLogsInputOperator.java
@@ -24,13 +24,14 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.contrib.rabbitmq.AbstractSinglePortRabbitMQInputOperator;
+
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.Partitioner.PartitioningContext;
 import com.datatorrent.api.DefaultPartition;
 import com.datatorrent.api.Partitioner;
 import com.datatorrent.apps.logstream.PropertyRegistry.LogstreamPropertyRegistry;
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.contrib.rabbitmq.AbstractSinglePortRabbitMQInputOperator;
 
 /**
  *
diff --git a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorTest.java b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorTest.java
index 1ecd094..6eadc6e 100644
--- a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorTest.java
+++ b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorTest.java
@@ -26,11 +26,10 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.apache.apex.malhar.lib.logs.DimensionObject;
+import org.apache.apex.malhar.lib.testbench.CollectorTestSink;
 import org.apache.commons.lang.mutable.MutableDouble;
 
-import com.datatorrent.lib.logs.DimensionObject;
-import com.datatorrent.lib.testbench.CollectorTestSink;
-
 import com.datatorrent.apps.logstream.PropertyRegistry.LogstreamPropertyRegistry;
 import com.datatorrent.netlet.util.DTThrowable;
 
diff --git a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorUnifierTest.java b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorUnifierTest.java
index a835dab..05929e2 100644
--- a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorUnifierTest.java
+++ b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/DimensionOperatorUnifierTest.java
@@ -26,11 +26,10 @@
 
 import org.junit.Test;
 
+import org.apache.apex.malhar.lib.logs.DimensionObject;
+import org.apache.apex.malhar.lib.testbench.CollectorTestSink;
 import org.apache.commons.lang.mutable.MutableDouble;
 
-import com.datatorrent.lib.logs.DimensionObject;
-import com.datatorrent.lib.testbench.CollectorTestSink;
-
 /**
  *
  * Tests logstream dimension operator unifier.
diff --git a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/FilterOperatorTest.java b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/FilterOperatorTest.java
index 2b7a5bd..a2bc229 100644
--- a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/FilterOperatorTest.java
+++ b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/FilterOperatorTest.java
@@ -23,7 +23,7 @@
 
 import org.junit.Test;
 
-import com.datatorrent.lib.testbench.CollectorTestSink;
+import org.apache.apex.malhar.lib.testbench.CollectorTestSink;
 
 import com.datatorrent.apps.logstream.PropertyRegistry.LogstreamPropertyRegistry;
 
diff --git a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/LogstreamTopNTest.java b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/LogstreamTopNTest.java
index 69d0567..d83373c 100644
--- a/apps/logstream/src/test/java/com/datatorrent/apps/logstream/LogstreamTopNTest.java
+++ b/apps/logstream/src/test/java/com/datatorrent/apps/logstream/LogstreamTopNTest.java
@@ -26,11 +26,10 @@
 import org.junit.Assert;
 import org.junit.Test;
 
+import org.apache.apex.malhar.lib.logs.DimensionObject;
+import org.apache.apex.malhar.lib.testbench.CollectorTestSink;
 import org.apache.commons.lang.mutable.MutableDouble;
 
-import com.datatorrent.lib.logs.DimensionObject;
-import com.datatorrent.lib.testbench.CollectorTestSink;
-
 import com.datatorrent.apps.logstream.PropertyRegistry.LogstreamPropertyRegistry;
 
 /**
diff --git a/apps/pom.xml b/apps/pom.xml
index 679d623..1f46ecd 100644
--- a/apps/pom.xml
+++ b/apps/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.apex</groupId>
     <artifactId>malhar</artifactId>
-    <version>3.9.0-SNAPSHOT</version>
+    <version>4.0.0-SNAPSHOT</version>
   </parent>
 
   <artifactId>malhar-apps</artifactId>
diff --git a/benchmark/pom.xml b/benchmark/pom.xml
index 0822549..1b921d7 100644
--- a/benchmark/pom.xml
+++ b/benchmark/pom.xml
@@ -24,7 +24,7 @@
   <parent>
     <artifactId>malhar</artifactId>
     <groupId>org.apache.apex</groupId>
-    <version>3.9.0-SNAPSHOT</version>
+    <version>4.0.0-SNAPSHOT</version>
   </parent>
 
   <groupId>org.apache.apex</groupId>
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/ApplicationFixed.java b/benchmark/src/main/java/org/apache/apex/benchmark/ApplicationFixed.java
index aa10eea..1491f47 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/ApplicationFixed.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/ApplicationFixed.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import org.apache.hadoop.conf.Configuration;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/Benchmark.java b/benchmark/src/main/java/org/apache/apex/benchmark/Benchmark.java
index d8d51b8..6992bd8 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/Benchmark.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/Benchmark.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import org.apache.hadoop.conf.Configuration;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppInput.java b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppInput.java
index bf5b876..6e29652 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppInput.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppInput.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import org.apache.hadoop.conf.Configuration;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppOutput.java b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppOutput.java
index 4f12791..95eaacc 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppOutput.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseAppOutput.java
@@ -16,8 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
@@ -25,7 +26,6 @@
 
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.testbench.RandomEventGenerator;
 
 /**
  *
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseInputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseInputOperator.java
index 8ae0a94..12e43e2 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseInputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseInputOperator.java
@@ -16,15 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import java.util.ArrayList;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.datatorrent.contrib.couchbase.AbstractCouchBaseInputOperator;
-import com.datatorrent.contrib.couchbase.CouchBaseWindowStore;
+import org.apache.apex.malhar.contrib.couchbase.AbstractCouchBaseInputOperator;
+import org.apache.apex.malhar.contrib.couchbase.CouchBaseWindowStore;
 
 /**
  * <p>CouchBaseInputOperator class.</p>
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseOutputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseOutputOperator.java
index 8b6fc49..43a8e91 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseOutputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/CouchBaseOutputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
-import com.datatorrent.contrib.couchbase.AbstractCouchBaseSetOperator;
+import org.apache.apex.malhar.contrib.couchbase.AbstractCouchBaseSetOperator;
 
 /**
  * <p>CouchBaseOutputOperator class.</p>
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/FixedTuplesInputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/FixedTuplesInputOperator.java
index f2582bd..c3248bf 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/FixedTuplesInputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/FixedTuplesInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import java.util.ArrayList;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/RandomMapOutput.java b/benchmark/src/main/java/org/apache/apex/benchmark/RandomMapOutput.java
index 3342771..d5af7a5 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/RandomMapOutput.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/RandomMapOutput.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import java.util.HashMap;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/RandomWordInputModule.java b/benchmark/src/main/java/org/apache/apex/benchmark/RandomWordInputModule.java
index 7d02de2..7224df3 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/RandomWordInputModule.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/RandomWordInputModule.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import javax.validation.constraints.Min;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/WordCountOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/WordCountOperator.java
index 8c55404..1138e84 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/WordCountOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/WordCountOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 /*
  * To change this template, choose Tools | Templates and open the template in the editor.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputBenchmarkApplication.java b/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputBenchmarkApplication.java
index 0a880fd..3282e32 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputBenchmarkApplication.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputBenchmarkApplication.java
@@ -16,16 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.aerospike;
+package org.apache.apex.benchmark.aerospike;
 
+import org.apache.apex.malhar.contrib.aerospike.AerospikeTransactionalStore;
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.contrib.aerospike.AerospikeTransactionalStore;
-import com.datatorrent.lib.testbench.RandomEventGenerator;
 
 /**
  * Application to benchmark the performance of aerospike output operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputOperator.java
index f9ee689..bb3661b 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/aerospike/AerospikeOutputOperator.java
@@ -16,16 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.aerospike;
+package org.apache.apex.benchmark.aerospike;
 
 import java.util.List;
 
+import org.apache.apex.malhar.contrib.aerospike.AbstractAerospikeTransactionalPutOperator;
+
 import com.aerospike.client.AerospikeException;
 import com.aerospike.client.Bin;
 import com.aerospike.client.Key;
 
-import com.datatorrent.contrib.aerospike.AbstractAerospikeTransactionalPutOperator;
-
 /**
  * <p>AerospikeOutputOperator class.</p>
  *
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkApplication.java b/benchmark/src/main/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkApplication.java
index f74311e..8e1d8b8 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkApplication.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkApplication.java
@@ -16,8 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.algo;
+package org.apache.apex.benchmark.algo;
 
+import org.apache.apex.malhar.lib.algo.UniqueCounter;
+import org.apache.apex.malhar.lib.converter.MapToKeyHashValuePairConverter;
+import org.apache.apex.malhar.lib.io.ConsoleOutputOperator;
+import org.apache.apex.malhar.lib.stream.Counter;
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context;
@@ -27,13 +32,6 @@
 import com.datatorrent.api.annotation.ApplicationAnnotation;
 import com.datatorrent.common.partitioner.StatelessPartitioner;
 
-import com.datatorrent.lib.algo.UniqueCounter;
-import com.datatorrent.lib.converter.MapToKeyHashValuePairConverter;
-import com.datatorrent.lib.io.ConsoleOutputOperator;
-
-import com.datatorrent.lib.stream.Counter;
-import com.datatorrent.lib.testbench.RandomEventGenerator;
-
 /**
  * Application to demonstrate PartitionableUniqueCount operator. <br>
  * The input operator generate random keys, which is sent to
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputBenchmarkApplication.java b/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputBenchmarkApplication.java
index 46d503f..6f91cd1 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputBenchmarkApplication.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputBenchmarkApplication.java
@@ -16,8 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.cassandra;
+package org.apache.apex.benchmark.cassandra;
 
+import org.apache.apex.malhar.contrib.cassandra.CassandraTransactionalStore;
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
@@ -26,10 +28,6 @@
 
 import com.datatorrent.api.annotation.ApplicationAnnotation;
 
-import com.datatorrent.contrib.cassandra.CassandraTransactionalStore;
-
-import com.datatorrent.lib.testbench.RandomEventGenerator;
-
 /**
  * Application to benchmark the performance of cassandra output operator.
  * The operator was tested on following configuration:
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputOperator.java
index 592d8a2..90bc44b 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/cassandra/CassandraOutputOperator.java
@@ -16,15 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.cassandra;
+package org.apache.apex.benchmark.cassandra;
+
+import org.apache.apex.malhar.contrib.cassandra.AbstractCassandraTransactionableOutputOperator;
 
 import com.datastax.driver.core.BoundStatement;
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Statement;
 import com.datastax.driver.core.exceptions.DriverException;
 
-import com.datatorrent.contrib.cassandra.AbstractCassandraTransactionableOutputOperator;
-
 /**
  * <p>CassandraOutputOperator class.</p>
  *
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSByteOutputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSByteOutputOperator.java
index ce0821c..56dd731 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSByteOutputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSByteOutputOperator.java
@@ -16,13 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.fs;
+package org.apache.apex.benchmark.fs;
 
 import java.util.Arrays;
 
 import javax.validation.constraints.Min;
 
-import com.datatorrent.lib.io.fs.AbstractFileOutputOperator;
+import org.apache.apex.malhar.lib.io.fs.AbstractFileOutputOperator;
 
 /**
  * This output operator receives
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSOutputOperatorBenchmark.java b/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSOutputOperatorBenchmark.java
index 7a63d18..baddbc1 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSOutputOperatorBenchmark.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/fs/FSOutputOperatorBenchmark.java
@@ -16,8 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.fs;
+package org.apache.apex.benchmark.fs;
 
+import org.apache.apex.malhar.lib.counters.BasicCounters;
+import org.apache.apex.malhar.lib.testbench.RandomWordGenerator;
 import org.apache.commons.lang.mutable.MutableLong;
 import org.apache.hadoop.conf.Configuration;
 
@@ -29,10 +31,6 @@
 
 import com.datatorrent.api.annotation.ApplicationAnnotation;
 
-import com.datatorrent.lib.counters.BasicCounters;
-
-import com.datatorrent.lib.testbench.RandomWordGenerator;
-
 /**
  * Application used to benchmark HDFS output operator
  * The DAG consists of random word generator operator that is
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkingApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkingApp.java
index 95fa961..2ecb2eb 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkingApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkingApp.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.hive;
+package org.apache.apex.benchmark.hive;
 
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -28,6 +28,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.hive.AbstractFSRollingOutputOperator;
+import org.apache.apex.malhar.hive.HiveOperator;
+import org.apache.apex.malhar.hive.HiveStore;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.OperatorContext;
@@ -39,9 +42,6 @@
 import com.datatorrent.api.StreamingApplication;
 
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.contrib.hive.AbstractFSRollingOutputOperator;
-import com.datatorrent.contrib.hive.HiveOperator;
-import com.datatorrent.contrib.hive.HiveStore;
 
 /**
  * Application used to benchmark HIVE Insert operator
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveMapInsertBenchmarkingApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveMapInsertBenchmarkingApp.java
index 98d9ce3..800fa5a 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveMapInsertBenchmarkingApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/hive/HiveMapInsertBenchmarkingApp.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.hive;
+package org.apache.apex.benchmark.hive;
 
 import java.sql.SQLException;
 import java.sql.Statement;
@@ -27,18 +27,17 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.benchmark.RandomMapOutput;
+import org.apache.apex.malhar.hive.AbstractFSRollingOutputOperator;
+import org.apache.apex.malhar.hive.HiveOperator;
+import org.apache.apex.malhar.hive.HiveStore;
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.benchmark.RandomMapOutput;
-
-import com.datatorrent.contrib.hive.AbstractFSRollingOutputOperator;
-import com.datatorrent.contrib.hive.HiveOperator;
-import com.datatorrent.contrib.hive.HiveStore;
-import com.datatorrent.lib.testbench.RandomEventGenerator;
 
 
 /**
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkKafkaInputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkKafkaInputOperator.java
index e147ad7..0bde5ed 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkKafkaInputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkKafkaInputOperator.java
@@ -16,10 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.kafka;
+package org.apache.apex.benchmark.kafka;
+
+import org.apache.apex.malhar.contrib.kafka.AbstractKafkaInputOperator;
 
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.contrib.kafka.AbstractKafkaInputOperator;
 
 import kafka.message.Message;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkPartitionableKafkaOutputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkPartitionableKafkaOutputOperator.java
index 6353c37..37b4d0a 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkPartitionableKafkaOutputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/BenchmarkPartitionableKafkaOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.kafka;
+package org.apache.apex.benchmark.kafka;
 
 import java.util.ArrayList;
 import java.util.Collection;
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmark.java b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmark.java
index ead6c66..e51da05 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmark.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmark.java
@@ -16,11 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.kafka;
+package org.apache.apex.benchmark.kafka;
 
 import java.util.Properties;
 
-
+import org.apache.apex.malhar.contrib.kafka.HighlevelKafkaConsumer;
+import org.apache.apex.malhar.contrib.kafka.KafkaConsumer;
+import org.apache.apex.malhar.contrib.kafka.SimpleKafkaConsumer;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.OperatorContext;
@@ -35,9 +37,6 @@
 import com.datatorrent.api.annotation.ApplicationAnnotation;
 
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.contrib.kafka.HighlevelKafkaConsumer;
-import com.datatorrent.contrib.kafka.KafkaConsumer;
-import com.datatorrent.contrib.kafka.SimpleKafkaConsumer;
 
 /**
  * The stream app to test the benckmark of kafka
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmark.java b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmark.java
index 0dd4352..e8578a6 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmark.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmark.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.kafka;
+package org.apache.apex.benchmark.kafka;
 
 import org.apache.hadoop.conf.Configuration;
 
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaTestPartitioner.java b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaTestPartitioner.java
index 65601d5..64ef2c7 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaTestPartitioner.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/kafka/KafkaTestPartitioner.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.kafka;
+package org.apache.apex.benchmark.kafka;
 
 import kafka.producer.Partitioner;
 import kafka.utils.VerifiableProperties;
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/package-info.java b/benchmark/src/main/java/org/apache/apex/benchmark/package-info.java
index 5ee1888..9f99b8f 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/package-info.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/package-info.java
@@ -19,4 +19,4 @@
 /**
  * Apex malhar performance demonstration application.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkApplication.java b/benchmark/src/main/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkApplication.java
index b86cd01..065f546 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkApplication.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkApplication.java
@@ -16,8 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.script;
+package org.apache.apex.benchmark.script;
 
+import org.apache.apex.benchmark.RandomMapOutput;
+import org.apache.apex.malhar.contrib.ruby.RubyOperator;
+import org.apache.apex.malhar.lib.io.ConsoleOutputOperator;
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -27,12 +31,6 @@
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
 
-import com.datatorrent.benchmark.RandomMapOutput;
-import com.datatorrent.contrib.ruby.RubyOperator;
-
-import com.datatorrent.lib.io.ConsoleOutputOperator;
-import com.datatorrent.lib.testbench.RandomEventGenerator;
-
 /**
  *
  * Application to benchmark the performance of ruby operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkApp.java
index 300ea64..4419a11 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkApp.java
@@ -16,8 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.spillable;
+package org.apache.apex.benchmark.spillable;
 
+import org.apache.apex.malhar.lib.fileaccess.TFileImpl;
 import org.apache.apex.malhar.lib.state.spillable.managed.ManagedStateSpillableStateStore;
 import org.apache.hadoop.conf.Configuration;
 
@@ -26,7 +27,6 @@
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.fileaccess.TFileImpl;
 
 @ApplicationAnnotation(name = "SpillableBenchmarkApp")
 /**
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestInputOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestInputOperator.java
index 9bee9a6..0285f04 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestInputOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.spillable;
+package org.apache.apex.benchmark.spillable;
 
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.InputOperator;
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestOperator.java
index 7c45106..c827acb 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/spillable/SpillableTestOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.spillable;
+package org.apache.apex.benchmark.spillable;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkApp.java
index 2dc6f0d..5ba8a33 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkApp.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.state;
+package org.apache.apex.benchmark.state;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -27,7 +27,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.fileaccess.TFileImpl;
 import org.apache.apex.malhar.lib.state.managed.ManagedTimeUnifiedStateImpl;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.hadoop.conf.Configuration;
 
 import com.google.common.collect.Lists;
@@ -43,8 +45,6 @@
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.fileaccess.TFileImpl;
-import com.datatorrent.lib.util.KeyValPair;
 
 @ApplicationAnnotation(name = "ManagedStateBenchmark")
 /**
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/state/StoreOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/state/StoreOperator.java
index 60a775c..eea4ac5 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/state/StoreOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/state/StoreOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.state;
+package org.apache.apex.benchmark.state;
 
 import java.nio.ByteBuffer;
 import java.util.LinkedList;
@@ -28,6 +28,7 @@
 import org.slf4j.LoggerFactory;
 
 import org.apache.apex.malhar.lib.state.managed.ManagedTimeUnifiedStateImpl;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 
 import com.google.common.collect.Maps;
 
@@ -35,7 +36,6 @@
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.Operator;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.util.KeyValPair;
 import com.datatorrent.netlet.util.Slice;
 
 /**
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/stream/DevNullCounterBenchmark.java b/benchmark/src/main/java/org/apache/apex/benchmark/stream/DevNullCounterBenchmark.java
index b0b7314..ef853e1 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/stream/DevNullCounterBenchmark.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/stream/DevNullCounterBenchmark.java
@@ -16,8 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.stream;
+package org.apache.apex.benchmark.stream;
 
+import org.apache.apex.malhar.lib.stream.DevNullCounter;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -25,11 +26,10 @@
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNullCounter;
 
 /**
  *
- * Functional tests for {@link com.datatorrent.lib.testbench.DevNullCounter}.
+ * Functional tests for {@link org.apache.apex.malhar.lib.testbench.DevNullCounter}.
  * <p>
  * <br>
  * oper.process is called a billion times<br>
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/stream/IntegerOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/stream/IntegerOperator.java
index c716206..1fca7e5 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/stream/IntegerOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/stream/IntegerOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.stream;
+package org.apache.apex.benchmark.stream;
 
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultOutputPort;
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamDuplicaterApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamDuplicaterApp.java
index 2e5bcf9..37936e6 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamDuplicaterApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamDuplicaterApp.java
@@ -16,8 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.stream;
+package org.apache.apex.benchmark.stream;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.stream.StreamDuplicater;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -25,8 +27,6 @@
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.stream.StreamDuplicater;
 
 /**
  * Benchmark App for StreamDuplicater Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamMergeApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamMergeApp.java
index bb1d081..de9507a 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamMergeApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/stream/StreamMergeApp.java
@@ -16,8 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.stream;
+package org.apache.apex.benchmark.stream;
 
+import org.apache.apex.benchmark.WordCountOperator;
+import org.apache.apex.malhar.lib.stream.StreamMerger;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -25,8 +27,6 @@
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.benchmark.WordCountOperator;
-import com.datatorrent.lib.stream.StreamMerger;
 
 /**
  * Benchmark App for StreamMerge Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierApp.java
index b1ddbee..d12267e 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierApp.java
@@ -16,11 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.EventClassifier;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -28,8 +30,6 @@
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.EventClassifier;
 
 /**
  * Benchmark App for EventClassifier Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleApp.java
index 5fe478b..c1405fc 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleApp.java
@@ -16,10 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.HashMap;
 
+import org.apache.apex.benchmark.WordCountOperator;
+import org.apache.apex.benchmark.stream.IntegerOperator;
+import org.apache.apex.malhar.lib.testbench.EventClassifierNumberToHashDouble;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -27,9 +30,6 @@
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.benchmark.WordCountOperator;
-import com.datatorrent.benchmark.stream.IntegerOperator;
-import com.datatorrent.lib.testbench.EventClassifierNumberToHashDouble;
 
 /**
  * Benchmark App for EventClassifierNumberToHashDouble Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventGeneratorApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventGeneratorApp.java
index 8f28ae6..0c997dc 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventGeneratorApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventGeneratorApp.java
@@ -16,10 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.EventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -27,8 +29,6 @@
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.EventGenerator;
 
 /**
  * Benchmark App for EventGenerator Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventIncrementerApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventIncrementerApp.java
index e562224..d7aa2dc 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventIncrementerApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/EventIncrementerApp.java
@@ -16,19 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.EventIncrementer;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.EventIncrementer;
 
 /**
  * Benchmark App for EventIncrementer Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilterClassifierApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilterClassifierApp.java
index ea2943f..57664ac 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilterClassifierApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilterClassifierApp.java
@@ -16,19 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.FilterClassifier;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.FilterClassifier;
 
 /**
  * Benchmark App for FilterClassifier Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierApp.java
index 52c0bed..f282838 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierApp.java
@@ -16,19 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.ArrayList;
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.FilteredEventClassifier;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.FilteredEventClassifier;
 
 /**
  * Benchmark App for FilteredEventClassifier Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/HashMapOperator.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/HashMapOperator.java
index 29cd079..66fab36 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/HashMapOperator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/HashMapOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -24,10 +24,11 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.testbench.EventGenerator;
+
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.InputOperator;
-import com.datatorrent.lib.testbench.EventGenerator;
 
 /**
  * HashMap Input Operator used as a helper in testbench benchmarking apps.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/RandomEventGeneratorApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/RandomEventGeneratorApp.java
index df5b11e..bdca047 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/RandomEventGeneratorApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/RandomEventGeneratorApp.java
@@ -16,16 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.RandomEventGenerator;
 
 /**
  * Benchmark App for RandomEventGenerator Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/SeedEventGeneratorApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/SeedEventGeneratorApp.java
index faafcbf..0b7f7b5 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/SeedEventGeneratorApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/SeedEventGeneratorApp.java
@@ -16,10 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.SeedEventGenerator;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.PortContext;
@@ -27,9 +31,6 @@
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.SeedEventGenerator;
-import com.datatorrent.lib.util.KeyValPair;
 
 
 /**
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/ThroughputCounterApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/ThroughputCounterApp.java
index d6e762e..4ad35f9 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/testbench/ThroughputCounterApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/testbench/ThroughputCounterApp.java
@@ -16,18 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.stream.DevNull;
+import org.apache.apex.malhar.lib.testbench.ThroughputCounter;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.stream.DevNull;
-import com.datatorrent.lib.testbench.ThroughputCounter;
 
 /**
  * Benchmark App for ThroughputCounter Operator.
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractGenerator.java b/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractGenerator.java
index bfdc0ec..a12354e 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractGenerator.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractGenerator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.window;
+package org.apache.apex.benchmark.window;
 
 import java.nio.ByteBuffer;
 import java.util.Random;
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractWindowedOperatorBenchmarkApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractWindowedOperatorBenchmarkApp.java
index 7a345fe..dc0ad48 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractWindowedOperatorBenchmarkApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/window/AbstractWindowedOperatorBenchmarkApp.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.window;
+package org.apache.apex.benchmark.window;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -25,10 +25,13 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.benchmark.window.WindowedOperatorBenchmarkApp.WindowedGenerator;
+import org.apache.apex.malhar.lib.fileaccess.TFileImpl;
 import org.apache.apex.malhar.lib.state.managed.UnboundedTimeBucketAssigner;
 import org.apache.apex.malhar.lib.state.spillable.SpillableComplexComponentImpl;
 import org.apache.apex.malhar.lib.state.spillable.SpillableStateStore;
 import org.apache.apex.malhar.lib.state.spillable.managed.ManagedTimeUnifiedStateSpillableStateStore;
+import org.apache.apex.malhar.lib.stream.DevNull;
 import org.apache.apex.malhar.lib.window.Accumulation;
 import org.apache.apex.malhar.lib.window.TriggerOption;
 import org.apache.apex.malhar.lib.window.WindowOption;
@@ -47,9 +50,6 @@
 import com.datatorrent.api.Stats.OperatorStats;
 import com.datatorrent.api.StatsListener;
 import com.datatorrent.api.StreamingApplication;
-import com.datatorrent.benchmark.window.WindowedOperatorBenchmarkApp.WindowedGenerator;
-import com.datatorrent.lib.fileaccess.TFileImpl;
-import com.datatorrent.lib.stream.DevNull;
 
 /**
  * @since 3.7.0
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkApp.java
index 7e38584..b8dce7f 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkApp.java
@@ -16,14 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.window;
+package org.apache.apex.benchmark.window;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.fileaccess.TFileImpl;
 import org.apache.apex.malhar.lib.state.spillable.SpillableComplexComponentImpl;
 import org.apache.apex.malhar.lib.state.spillable.SpillableSetMultimapImpl;
 import org.apache.apex.malhar.lib.state.spillable.managed.ManagedStateSpillableStateStore;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.apex.malhar.lib.utils.serde.GenericSerde;
 import org.apache.apex.malhar.lib.window.Accumulation;
 import org.apache.apex.malhar.lib.window.Tuple;
@@ -39,8 +41,6 @@
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.lib.fileaccess.TFileImpl;
-import com.datatorrent.lib.util.KeyValPair;
 
 /**
  * @since 3.7.0
diff --git a/benchmark/src/main/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkApp.java b/benchmark/src/main/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkApp.java
index 7e402fc..15a577e 100644
--- a/benchmark/src/main/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkApp.java
+++ b/benchmark/src/main/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkApp.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.window;
+package org.apache.apex.benchmark.window;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/ApplicationFixedTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/ApplicationFixedTest.java
index cd8a3ec..644cd01 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/ApplicationFixedTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/ApplicationFixedTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import java.io.IOException;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/BenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/BenchmarkTest.java
index 0a21a7c..c5249a2 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/BenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/BenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import java.io.IOException;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/CouchBaseBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/CouchBaseBenchmarkTest.java
index 6a1c968..aef17b3 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/CouchBaseBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/CouchBaseBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark;
+package org.apache.apex.benchmark;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloApp.java b/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloApp.java
index e2936fe..3417b58 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloApp.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloApp.java
@@ -16,17 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.accumulo;
+package org.apache.apex.benchmark.accumulo;
 
 import org.apache.accumulo.core.data.Mutation;
+import org.apache.apex.malhar.contrib.accumulo.AbstractAccumuloOutputOperator;
+import org.apache.apex.malhar.contrib.accumulo.AccumuloRowTupleGenerator;
+import org.apache.apex.malhar.contrib.accumulo.AccumuloTestHelper;
+import org.apache.apex.malhar.contrib.accumulo.AccumuloTuple;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.StreamingApplication;
-import com.datatorrent.contrib.accumulo.AbstractAccumuloOutputOperator;
-import com.datatorrent.contrib.accumulo.AccumuloRowTupleGenerator;
-import com.datatorrent.contrib.accumulo.AccumuloTestHelper;
-import com.datatorrent.contrib.accumulo.AccumuloTuple;
 
 /**
  * BenchMark Results
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloAppTest.java
index 8b47a9b..0f48d16 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/accumulo/AccumuloAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.accumulo;
+package org.apache.apex.benchmark.accumulo;
 
 import org.junit.Test;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/aerospike/AerospikeBenchmarkAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/aerospike/AerospikeBenchmarkAppTest.java
index 14fe441..9eea1b7 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/aerospike/AerospikeBenchmarkAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/aerospike/AerospikeBenchmarkAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.aerospike;
+package org.apache.apex.benchmark.aerospike;
 
 import org.junit.Test;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkTest.java
index 079d073..56c0cf2 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/algo/UniqueValueCountBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.algo;
+package org.apache.apex.benchmark.algo;
 
 import org.junit.Test;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/cassandra/CassandraApplicatonTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/cassandra/CassandraApplicatonTest.java
index ec4f308..1708eaf 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/cassandra/CassandraApplicatonTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/cassandra/CassandraApplicatonTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.cassandra;
+package org.apache.apex.benchmark.cassandra;
 
 import org.junit.Test;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseApplicationTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseApplicationTest.java
index 32a4907..94bc6a9 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseApplicationTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseApplicationTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.hbase;
+package org.apache.apex.benchmark.hbase;
 
 import org.junit.Test;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseCsvMappingApplication.java b/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseCsvMappingApplication.java
index b61f1d3..b6ec667 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseCsvMappingApplication.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/hbase/HBaseCsvMappingApplication.java
@@ -16,16 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.hbase;
+package org.apache.apex.benchmark.hbase;
 
+import org.apache.apex.malhar.contrib.hbase.HBaseCsvMappingPutOperator;
+import org.apache.apex.malhar.contrib.hbase.HBaseRowStringGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.contrib.hbase.HBaseCsvMappingPutOperator;
-import com.datatorrent.contrib.hbase.HBaseRowStringGenerator;
 
 /**
  * BenchMark Results
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkTest.java
index 653c6f6..aa50e45 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveInsertBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.hive;
+package org.apache.apex.benchmark.hive;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveMapBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveMapBenchmarkTest.java
index e0097c6..6bd2edf 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveMapBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/hive/HiveMapBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.hive;
+package org.apache.apex.benchmark.hive;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmarkTest.java
index 6cb901a..8f2c42c 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaInputBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.kafka;
+package org.apache.apex.benchmark.kafka;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmarkTest.java
index 4de7193..1823a00 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/kafka/KafkaOutputBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.kafka;
+package org.apache.apex.benchmark.kafka;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmark.java b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmark.java
index 9201cd5..413438a 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmark.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmark.java
@@ -16,18 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.memsql;
+package org.apache.apex.benchmark.memsql;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.contrib.memsql.MemsqlInputOperator;
+import org.apache.apex.malhar.lib.stream.DevNull;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.contrib.memsql.MemsqlInputOperator;
-import com.datatorrent.lib.stream.DevNull;
 
 /**
  * BenchMark Results
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmarkTest.java
index a596903..f3127a5 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlInputBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.memsql;
+package org.apache.apex.benchmark.memsql;
 
 import java.io.FileInputStream;
 import java.io.IOException;
@@ -28,6 +28,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.contrib.memsql.AbstractMemsqlOutputOperatorTest;
+import org.apache.apex.malhar.contrib.memsql.MemsqlPOJOOutputOperator;
+import org.apache.apex.malhar.contrib.memsql.MemsqlStore;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 
@@ -35,15 +38,12 @@
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.LocalMode;
 import com.datatorrent.api.Operator.ProcessingMode;
-import com.datatorrent.contrib.memsql.AbstractMemsqlOutputOperatorTest;
-import com.datatorrent.contrib.memsql.MemsqlPOJOOutputOperator;
-import com.datatorrent.contrib.memsql.MemsqlStore;
 import com.datatorrent.netlet.util.DTThrowable;
 
-import static com.datatorrent.contrib.memsql.AbstractMemsqlOutputOperatorTest.BATCH_SIZE;
-import static com.datatorrent.lib.db.jdbc.JdbcNonTransactionalOutputOperatorTest.APP_ID;
-import static com.datatorrent.lib.db.jdbc.JdbcNonTransactionalOutputOperatorTest.OPERATOR_ID;
-import static com.datatorrent.lib.helper.OperatorContextTestHelper.mockOperatorContext;
+import static org.apache.apex.malhar.contrib.memsql.AbstractMemsqlOutputOperatorTest.BATCH_SIZE;
+import static org.apache.apex.malhar.lib.db.jdbc.JdbcNonTransactionalOutputOperatorTest.APP_ID;
+import static org.apache.apex.malhar.lib.db.jdbc.JdbcNonTransactionalOutputOperatorTest.OPERATOR_ID;
+import static org.apache.apex.malhar.lib.helper.OperatorContextTestHelper.mockOperatorContext;
 
 public class MemsqlInputBenchmarkTest
 {
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmark.java b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmark.java
index 297bc6d..0b4f126 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmark.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmark.java
@@ -16,19 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.memsql;
+package org.apache.apex.benchmark.memsql;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.contrib.memsql.MemsqlPOJOOutputOperator;
+import org.apache.apex.malhar.lib.testbench.RandomEventGenerator;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
 import com.datatorrent.api.annotation.ApplicationAnnotation;
-import com.datatorrent.contrib.memsql.MemsqlPOJOOutputOperator;
-import com.datatorrent.lib.testbench.RandomEventGenerator;
 
 /**
  * BenchMark Results
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmarkTest.java
index bf82ab3..3d4766e 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/memsql/MemsqlOutputBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.memsql;
+package org.apache.apex.benchmark.memsql;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
@@ -27,12 +27,12 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.contrib.memsql.AbstractMemsqlOutputOperatorTest;
+import org.apache.apex.malhar.contrib.memsql.MemsqlStore;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.LocalMode;
-import com.datatorrent.contrib.memsql.AbstractMemsqlOutputOperatorTest;
-import com.datatorrent.contrib.memsql.MemsqlStore;
 import com.datatorrent.netlet.util.DTThrowable;
 
 public class MemsqlOutputBenchmarkTest
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkAppTest.java
index d270e7f..904ea6b 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/script/RubyOperatorBenchmarkAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.script;
+package org.apache.apex.benchmark.script;
 
 import org.junit.Test;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkAppTester.java b/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkAppTester.java
index cd2c640..6490aa9 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkAppTester.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableBenchmarkAppTester.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.spillable;
+package org.apache.apex.benchmark.spillable;
 
 import java.io.File;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableDSBenchmarkTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableDSBenchmarkTest.java
index b87fec1..f6ecf0c 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableDSBenchmarkTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/spillable/SpillableDSBenchmarkTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.spillable;
+package org.apache.apex.benchmark.spillable;
 
 import java.util.Random;
 
@@ -27,14 +27,13 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.fileaccess.TFileImpl;
 import org.apache.apex.malhar.lib.state.spillable.SpillableMapImpl;
 import org.apache.apex.malhar.lib.state.spillable.SpillableTestUtils;
 import org.apache.apex.malhar.lib.state.spillable.managed.ManagedStateSpillableStateStore;
 import org.apache.apex.malhar.lib.utils.serde.Serde;
 import org.apache.apex.malhar.lib.utils.serde.StringSerde;
 
-import com.datatorrent.lib.fileaccess.TFileImpl;
-
 public class SpillableDSBenchmarkTest
 {
   private static final Logger logger = LoggerFactory.getLogger(SpillableDSBenchmarkTest.class);
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkAppTest.java
index dc8f4b4..eb3cc7a 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/state/ManagedStateBenchmarkAppTest.java
@@ -16,20 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.state;
+package org.apache.apex.benchmark.state;
 
 import java.io.File;
 
 import org.junit.Before;
 import org.junit.Test;
 
+import org.apache.apex.benchmark.state.StoreOperator.ExecMode;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.LocalMode;
 import com.datatorrent.api.StreamingApplication;
-import com.datatorrent.benchmark.state.StoreOperator.ExecMode;
 
 /**
  * This is not a really unit test, but in fact a benchmark runner.
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierAppTest.java
index 99d8a1f..88d66b4 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleAppTest.java
index 929d8bc..9fb4638 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventClassifierNumberToHashDoubleAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventGeneratorAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventGeneratorAppTest.java
index 5a427a5..0a71ea4 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventGeneratorAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventGeneratorAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventIncrementerAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventIncrementerAppTest.java
index 1a85a7b..fb4401e 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventIncrementerAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/EventIncrementerAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilterClassifierAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilterClassifierAppTest.java
index 9419022..c921a4c 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilterClassifierAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilterClassifierAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierAppTest.java
index 977d6b7..658e372 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/FilteredEventClassifierAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/ThroughputCounterAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/ThroughputCounterAppTest.java
index 92ca0fd..b278f6d 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/testbench/ThroughputCounterAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/testbench/ThroughputCounterAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.testbench;
+package org.apache.apex.benchmark.testbench;
 
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/util/serde/GenericSerdePerformanceTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/util/serde/GenericSerdePerformanceTest.java
index 157accc..64fca9e 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/util/serde/GenericSerdePerformanceTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/util/serde/GenericSerdePerformanceTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.util.serde;
+package org.apache.apex.benchmark.util.serde;
 
 import java.util.Random;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkAppTest.java
index 2bc9335..fe9ba0c 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/window/KeyedWindowedOperatorBenchmarkAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.window;
+package org.apache.apex.benchmark.window;
 
 import java.io.File;
 
diff --git a/benchmark/src/test/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkAppTest.java b/benchmark/src/test/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkAppTest.java
index 4a16396..b6f3549 100644
--- a/benchmark/src/test/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkAppTest.java
+++ b/benchmark/src/test/java/org/apache/apex/benchmark/window/WindowedOperatorBenchmarkAppTest.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.benchmark.window;
+package org.apache.apex.benchmark.window;
 
 import java.io.File;
 
diff --git a/contrib/pom.xml b/contrib/pom.xml
index a69ad29..13c77b9 100755
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.apex</groupId>
     <artifactId>malhar</artifactId>
-    <version>3.9.0-SNAPSHOT</version>
+    <version>4.0.0-SNAPSHOT</version>
   </parent>
 
   <artifactId>malhar-contrib</artifactId>
@@ -36,7 +36,6 @@
     <!-- skip tests by default as they depend on manual setup -->
     <skip.contrib.module.tests>false</skip.contrib.module.tests>
     <skipTests>true</skipTests>
-    <checkstyle.console>false</checkstyle.console>
   </properties>
   <repositories>
     <repository>
@@ -217,14 +216,6 @@
         <artifactId>japicmp-maven-plugin</artifactId>
       </plugin>
 
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <maxAllowedViolations>2709</maxAllowedViolations>
-          <logViolationsToConsole>${checkstyle.console}</logViolationsToConsole>
-        </configuration>
-      </plugin>
     </plugins>
   </build>
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloInputOperator.java
index 50a1056..8250cd0 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.accumulo;
+package org.apache.apex.malhar.contrib.accumulo;
 
 import java.util.Map.Entry;
 
@@ -24,8 +24,8 @@
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
 
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
 /**
  * Base input adapter, which reads data from persistence database and writes into output port(s).&nbsp; Subclasses should provide the
  * implementation of getting the tuples and scanner methods.
@@ -38,18 +38,21 @@
  * @tags key value, accumulo
  * @since 1.0.4
  */
-public abstract class AbstractAccumuloInputOperator<T> extends AbstractStoreInputOperator<T, AccumuloStore> {
+public abstract class AbstractAccumuloInputOperator<T> extends AbstractStoreInputOperator<T, AccumuloStore>
+{
 
   public abstract T getTuple(Entry<Key, Value> entry);
 
   public abstract Scanner getScanner(Connector conn);
 
-  public AbstractAccumuloInputOperator() {
+  public AbstractAccumuloInputOperator()
+  {
     store = new AccumuloStore();
   }
 
   @Override
-  public void emitTuples() {
+  public void emitTuples()
+  {
     Connector conn = store.getConnector();
     Scanner scan = getScanner(conn);
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloOutputOperator.java
index a4ae1ac..153f88d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AbstractAccumuloOutputOperator.java
@@ -16,19 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.accumulo;
+package org.apache.apex.malhar.contrib.accumulo;
 
 import java.util.List;
 
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.data.Mutation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.apex.malhar.lib.db.AbstractAggregateTransactionableStoreOutputOperator;
+
+import com.google.common.collect.Lists;
+
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.db.AbstractAggregateTransactionableStoreOutputOperator;
-import com.google.common.collect.Lists;
 
 /**
  * Base output operator that stores tuples in Accumulo rows.&nbsp; Subclasses should provide implementation of operationMutation method. <br>
@@ -56,7 +58,8 @@
  *            The tuple type
  * @since 1.0.4
  */
-public abstract class AbstractAccumuloOutputOperator<T> extends AbstractAggregateTransactionableStoreOutputOperator<T, AccumuloWindowStore> {
+public abstract class AbstractAccumuloOutputOperator<T> extends AbstractAggregateTransactionableStoreOutputOperator<T, AccumuloWindowStore>
+{
   private static final transient Logger logger = LoggerFactory.getLogger(AbstractAccumuloOutputOperator.class);
   private final List<T> tuples;
   private transient ProcessingMode mode;
@@ -75,6 +78,7 @@
     tuples = Lists.newArrayList();
     store = new AccumuloWindowStore();
   }
+
   @Override
   public void processTuple(T tuple)
   {
@@ -82,7 +86,8 @@
   }
 
   @Override
-  public void storeAggregate() {
+  public void storeAggregate()
+  {
     try {
       for (T tuple : tuples) {
         Mutation mutation = operationMutation(tuple);
@@ -96,6 +101,7 @@
     }
     tuples.clear();
   }
+
   /**
    *
    * @param t
@@ -106,11 +112,11 @@
   @Override
   public void setup(OperatorContext context)
   {
-    mode=context.getValue(context.PROCESSING_MODE);
-    if(mode==ProcessingMode.EXACTLY_ONCE){
+    mode = context.getValue(context.PROCESSING_MODE);
+    if (mode == ProcessingMode.EXACTLY_ONCE) {
       throw new RuntimeException("This operator only supports atmost once and atleast once processing modes");
     }
-    if(mode==ProcessingMode.AT_MOST_ONCE){
+    if (mode == ProcessingMode.AT_MOST_ONCE) {
       tuples.clear();
     }
     super.setup(context);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloStore.java
index 52d273a..2eb4db0 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloStore.java
@@ -16,10 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.accumulo;
+package org.apache.apex.malhar.contrib.accumulo;
 
 import java.io.IOException;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -30,11 +33,9 @@
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.db.Connectable;
 
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.db.Connectable;
 
 /**
  * A {@link Connectable} for accumulo that implements Connectable interface.
@@ -45,7 +46,8 @@
  * @param <T>
  * @since 1.0.4
  */
-public class AccumuloStore implements Connectable {
+public class AccumuloStore implements Connectable
+{
   private static final transient Logger logger = LoggerFactory.getLogger(AccumuloStore.class);
   private String zookeeperHost;
   private String instanceName;
@@ -58,12 +60,13 @@
 
   private long memoryLimit;
   private int numThreads;
-  private static final long DEFAULT_MEMORY=2147483648l;
-  private static final int DEFAULT_THREADS=1;
+  private static final long DEFAULT_MEMORY = 2147483648L;
+  private static final int DEFAULT_THREADS = 1;
 
-  public AccumuloStore(){
-    memoryLimit=DEFAULT_MEMORY;
-    numThreads=DEFAULT_THREADS;
+  public AccumuloStore()
+  {
+    memoryLimit = DEFAULT_MEMORY;
+    numThreads = DEFAULT_THREADS;
   }
 
   /**
@@ -71,7 +74,8 @@
    *
    * @return Connector
    */
-  public Connector getConnector() {
+  public Connector getConnector()
+  {
     return connector;
   }
 
@@ -80,7 +84,8 @@
    *
    * @return TableName
    */
-  public String getTableName() {
+  public String getTableName()
+  {
     return tableName;
   }
 
@@ -89,7 +94,8 @@
    *
    * @param tableName
    */
-  public void setTableName(String tableName) {
+  public void setTableName(String tableName)
+  {
     this.tableName = tableName;
   }
 
@@ -98,7 +104,8 @@
    *
    * @return ZookeeperHost
    */
-  public String getZookeeperHost() {
+  public String getZookeeperHost()
+  {
     return zookeeperHost;
   }
 
@@ -107,7 +114,8 @@
    *
    * @param zookeeperHost
    */
-  public void setZookeeperHost(String zookeeperHost) {
+  public void setZookeeperHost(String zookeeperHost)
+  {
     this.zookeeperHost = zookeeperHost;
   }
 
@@ -116,7 +124,8 @@
    *
    * @return instanceName
    */
-  public String getInstanceName() {
+  public String getInstanceName()
+  {
     return instanceName;
   }
 
@@ -125,7 +134,8 @@
    *
    * @param instanceName
    */
-  public void setInstanceName(String instanceName) {
+  public void setInstanceName(String instanceName)
+  {
     this.instanceName = instanceName;
   }
 
@@ -134,7 +144,8 @@
    *
    * @param userName
    */
-  public void setUserName(String userName) {
+  public void setUserName(String userName)
+  {
     this.userName = userName;
   }
 
@@ -143,23 +154,28 @@
    *
    * @param password
    */
-  public void setPassword(String password) {
+  public void setPassword(String password)
+  {
     this.password = password;
   }
+
   /**
    * setter for memory limit
    *
    * @param memoryLimit
    */
-  public void setMemoryLimit(long memoryLimit) {
+  public void setMemoryLimit(long memoryLimit)
+  {
     this.memoryLimit = memoryLimit;
   }
+
   /**
    * setter for number of writer threads
    *
    * @param numThreads
    */
-  public void setNumThreads(int numThreads) {
+  public void setNumThreads(int numThreads)
+  {
     this.numThreads = numThreads;
   }
 
@@ -168,12 +184,14 @@
    *
    * @return BatchWriter
    */
-  public BatchWriter getBatchwriter() {
+  public BatchWriter getBatchwriter()
+  {
     return batchwriter;
   }
 
   @Override
-  public void connect() throws IOException {
+  public void connect() throws IOException
+  {
     Instance instance = null;
     instance = new ZooKeeperInstance(instanceName, zookeeperHost);
     try {
@@ -198,7 +216,8 @@
   }
 
   @Override
-  public void disconnect() throws IOException {
+  public void disconnect() throws IOException
+  {
     try {
       batchwriter.close();
     } catch (MutationsRejectedException e) {
@@ -208,7 +227,8 @@
   }
 
   @Override
-  public boolean isConnected() {
+  public boolean isConnected()
+  {
     // Not applicable for accumulo
     return false;
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloWindowStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloWindowStore.java
index 9bec031..412ae14 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloWindowStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/AccumuloWindowStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.accumulo;
+package org.apache.apex.malhar.contrib.accumulo;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -25,6 +25,9 @@
 import java.io.IOException;
 import java.util.Map.Entry;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableNotFoundException;
@@ -33,12 +36,10 @@
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
+import org.apache.apex.malhar.lib.db.TransactionableStore;
 import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.db.TransactionableStore;
 
 /**
  * Provides transactional support by implementing TransactionableStore abstract methods.
@@ -53,7 +54,8 @@
  * @tags accumulo, key value
  * @since 1.0.4
  */
-public class AccumuloWindowStore extends AccumuloStore implements TransactionableStore {
+public class AccumuloWindowStore extends AccumuloStore implements TransactionableStore
+{
   private static final transient Logger logger = LoggerFactory.getLogger(AccumuloWindowStore.class);
   private static final String DEFAULT_ROW_NAME = "AccumuloOperator_row";
   private static final String DEFAULT_COLUMN_FAMILY_NAME = "AccumuloOutputOperator_cf";
@@ -67,7 +69,8 @@
   private transient String lastWindowColumnName;
   private transient byte[] lastWindowColumnBytes;
 
-  public AccumuloWindowStore() {
+  public AccumuloWindowStore()
+  {
     rowName = DEFAULT_ROW_NAME;
     columnFamilyName = DEFAULT_COLUMN_FAMILY_NAME;
     lastWindowColumnName = DEFAULT_LAST_WINDOW_PREFIX_COLUMN_NAME;
@@ -78,54 +81,64 @@
    * the values are stored as byte arrays.This method converts string to byte
    * arrays. uses util class in hbase library to do so.
    */
-  private void constructKeys() {
+  private void constructKeys()
+  {
     rowBytes = rowName.getBytes();
     columnFamilyBytes = columnFamilyName.getBytes();
   }
 
-  public String getRowName() {
+  public String getRowName()
+  {
     return rowName;
   }
 
-  public void setRowName(String rowName) {
+  public void setRowName(String rowName)
+  {
     this.rowName = rowName;
     constructKeys();
   }
 
-  public String getColumnFamilyName() {
+  public String getColumnFamilyName()
+  {
     return columnFamilyName;
   }
 
-  public void setColumnFamilyName(String columnFamilyName) {
+  public void setColumnFamilyName(String columnFamilyName)
+  {
     this.columnFamilyName = columnFamilyName;
     constructKeys();
   }
 
   @Override
-  public void beginTransaction() {
+  public void beginTransaction()
+  {
     // accumulo does not support transactions
   }
 
   @Override
-  public void commitTransaction() {
+  public void commitTransaction()
+  {
     // accumulo does not support transactions
 
   }
 
   @Override
-  public void rollbackTransaction() {
+  public void rollbackTransaction()
+  {
     // accumulo does not support transactions
 
   }
 
   @Override
-  public boolean isInTransaction() {
+  public boolean isInTransaction()
+  {
     // accumulo does not support transactions
     return false;
   }
 
   @Override
-  public long getCommittedWindowId(String appId, int operatorId) {
+  public long getCommittedWindowId(String appId, int operatorId)
+  {
     byte[] value = null;
     Authorizations auths = new Authorizations();
     Scanner scan = null;
@@ -150,7 +163,8 @@
   }
 
   @Override
-  public void storeCommittedWindowId(String appId, int operatorId,long windowId) {
+  public void storeCommittedWindowId(String appId, int operatorId,long windowId)
+  {
     byte[] WindowIdBytes = toBytes(windowId);
     String columnKey = appId + "_" + operatorId + "_" + lastWindowColumnName;
     lastWindowColumnBytes = columnKey.getBytes();
@@ -166,19 +180,19 @@
   }
 
   @Override
-  public void removeCommittedWindowId(String appId, int operatorId) {
+  public void removeCommittedWindowId(String appId, int operatorId)
+  {
     // accumulo does not support transactions
-
   }
 
   public static byte[] toBytes(long l)
   {
-    ByteArrayOutputStream baos=new ByteArrayOutputStream(Long.SIZE/8);
-    DataOutputStream dos=new DataOutputStream(baos);
-    byte[] result=null;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream(Long.SIZE / 8);
+    DataOutputStream dos = new DataOutputStream(baos);
+    byte[] result = null;
     try {
       dos.writeLong(l);
-      result=baos.toByteArray();
+      result = baos.toByteArray();
       dos.close();
     } catch (IOException e) {
       logger.error("error converting to byte array");
@@ -187,11 +201,11 @@
     return result;
   }
 
-
-  public static long toLong(byte[] b){
-    ByteArrayInputStream baos=new ByteArrayInputStream(b);
-    DataInputStream dos=new DataInputStream(baos);
-    long result=0;
+  public static long toLong(byte[] b)
+  {
+    ByteArrayInputStream baos = new ByteArrayInputStream(b);
+    DataInputStream dos = new DataInputStream(baos);
+    long result = 0;
     try {
       result = dos.readLong();
       dos.close();
@@ -202,5 +216,4 @@
     return result;
   }
 
-
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/package-info.java
index cbac406..508c16e 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/accumulo/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.accumulo;
+package org.apache.apex.malhar.contrib.accumulo;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeGetOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeGetOperator.java
index 9f7469f..3cc0d69 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeGetOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeGetOperator.java
@@ -16,31 +16,34 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
 
 import com.aerospike.client.Record;
 import com.aerospike.client.query.RecordSet;
 import com.aerospike.client.query.Statement;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * Base input adapter, which reads data from persistence database through its API and writes into output port(s).&nsbsp;
- * Subclasses should provide the implementation of getting the tuples and querying to retrieve data.
+ * Base input adapter, which reads data from persistence database through its
+ * API and writes into output port(s). Subclasses should provide the
+ * implementation of getting the tuples and querying to retrieve data.
  * <p>
- * This is an abstract class. Sub-classes need to implement {@link #queryToRetrieveData()} and {@link #getTuple(Record)}.
- * </p>
+ * This is an abstract class. Sub-classes need to implement
+ * {@link #queryToRetrieveData()} and {@link #getTuple(Record)}.
+ *
  * @displayName Abstract Aerospike Get
  * @category Input
  * @tags get
  * @since 1.0.4
  */
-public abstract class AbstractAerospikeGetOperator<T> extends AbstractStoreInputOperator<T, AerospikeStore> {
-
+public abstract class AbstractAerospikeGetOperator<T> extends AbstractStoreInputOperator<T, AerospikeStore>
+{
   private static final Logger logger = LoggerFactory.getLogger(AbstractAerospikeGetOperator.class);
 
   /**
@@ -69,20 +72,19 @@
    * It then converts each row into tuple and emit that into output port.
    */
   @Override
-  public void emitTuples() {
-
+  public void emitTuples()
+  {
     Statement query = queryToRetrieveData();
     logger.debug(String.format("select statement: %s", query.toString()));
     RecordSet rs;
     try {
       rs = store.getClient().query(null, query);
-      while(rs.next()){
+      while (rs.next()) {
         Record rec = rs.getRecord();
         T tuple = getTuple(rec);
         outputPort.emit(tuple);
       }
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       store.disconnect();
       DTThrowable.rethrow(ex);
     }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeNonTransactionalPutOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeNonTransactionalPutOperator.java
index a629078..ca14a37 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeNonTransactionalPutOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeNonTransactionalPutOperator.java
@@ -16,32 +16,36 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
 
 import java.util.List;
 
+import org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator;
+
 import com.aerospike.client.AerospikeException;
 import com.aerospike.client.Bin;
 import com.aerospike.client.Key;
 import com.google.common.collect.Lists;
 
-import com.datatorrent.lib.db.AbstractStoreOutputOperator;
-
 
 /**
- * <p>
- * Generic base output adaptor which writes tuples as they come without providing any transactional support.&nbsp; Subclasses should provide implementation for getting updated bins.
- * </p>
+ * Generic base output adaptor which writes tuples as they come without
+ * providing any transactional support. Subclasses should provide implementation
+ * for getting updated bins.
+ *
  * @displayName Abstract Aerospike Non Transactional Put
  * @category Output
  * @tags put, non transactional
  * @param <T> type of tuple
  * @since 1.0.4
  */
-public abstract class AbstractAerospikeNonTransactionalPutOperator<T> extends AbstractStoreOutputOperator<T,AerospikeStore> {
+public abstract class AbstractAerospikeNonTransactionalPutOperator<T>
+    extends AbstractStoreOutputOperator<T,AerospikeStore>
+{
+  private final transient List<Bin> bins;
 
-  private transient final List<Bin> bins;
-  public AbstractAerospikeNonTransactionalPutOperator() {
+  public AbstractAerospikeNonTransactionalPutOperator()
+  {
     super();
     bins = Lists.newArrayList();
   }
@@ -58,8 +62,8 @@
   protected abstract Key getUpdatedBins(T tuple, List<Bin> bins) throws AerospikeException;
 
   @Override
-  public void processTuple(T tuple) {
-
+  public void processTuple(T tuple)
+  {
     Key key;
     Bin[] binsArray;
     try {
@@ -68,11 +72,9 @@
       binsArray = bins.toArray(binsArray);
       store.getClient().put(null, key, binsArray);
       bins.clear();
-    }
-    catch (AerospikeException e) {
+    } catch (AerospikeException e) {
       throw new RuntimeException(e);
     }
-
   }
 
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeTransactionalPutOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeTransactionalPutOperator.java
index 6db8896..9b8ba59 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeTransactionalPutOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AbstractAerospikeTransactionalPutOperator.java
@@ -16,39 +16,42 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
 
 import java.util.Collection;
 import java.util.List;
 
+import org.apache.apex.malhar.lib.db.AbstractBatchTransactionableStoreOutputOperator;
+
 import com.aerospike.client.AerospikeException;
 import com.aerospike.client.Bin;
 import com.aerospike.client.Key;
 import com.google.common.collect.Lists;
 
-import com.datatorrent.lib.db.AbstractBatchTransactionableStoreOutputOperator;
-
 /**
+ * Generic base adaptor which creates a transaction at the start of window.
+ * Subclasses should provide implementation for getting updated bins.
  * <p>
- * Generic base adaptor which creates a transaction at the start of window.&nbsp; Subclasses should provide implementation for getting updated bins. <br/>
+ * Executes all the put updates and closes the transaction at the end of the
+ * window. The tuples in a window are stored in check-pointed collection which
+ * is cleared in the endWindow(). This is needed for the recovery. The operator
+ * writes a tuple at least once in the database, which is why only when all the
+ * updates are executed, the transaction is committed in the end window call.
  * </p>
- * <p>
- * Executes all the put updates and closes the transaction at the end of the window.
- * The tuples in a window are stored in check-pointed collection which is cleared in the endWindow().
- * This is needed for the recovery. The operator writes a tuple at least once in the database, which is why
- * only when all the updates are executed, the transaction is committed in the end window call.
- * </p>
+ *
  * @displayName Abstract Aerospike Transactional Put
  * @category Output
  * @tags put, transactional
  * @param <T>type of tuple
  * @since 1.0.4
  */
-public abstract class AbstractAerospikeTransactionalPutOperator<T> extends AbstractBatchTransactionableStoreOutputOperator<T, AerospikeTransactionalStore> {
+public abstract class AbstractAerospikeTransactionalPutOperator<T>
+    extends AbstractBatchTransactionableStoreOutputOperator<T, AerospikeTransactionalStore>
+{
+  private final transient List<Bin> bins;
 
-  private transient final List<Bin> bins;
-
-  public AbstractAerospikeTransactionalPutOperator() {
+  public AbstractAerospikeTransactionalPutOperator()
+  {
     super();
     bins = Lists.newArrayList();
   }
@@ -70,7 +73,7 @@
     Key key;
     Bin[] binsArray;
     try {
-      for(T tuple: tuples) {
+      for (T tuple: tuples) {
         key = getUpdatedBins(tuple,bins);
         binsArray = new Bin[bins.size()];
         binsArray = bins.toArray(binsArray);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJONonTransactionalPutOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJONonTransactionalPutOperator.java
index f69e433..49580d0 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJONonTransactionalPutOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJONonTransactionalPutOperator.java
@@ -16,22 +16,22 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
 
 import java.util.ArrayList;
 import java.util.List;
 
 import javax.validation.constraints.NotNull;
 
-import com.aerospike.client.Bin;
-import com.aerospike.client.Key;
-
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
+import com.aerospike.client.Bin;
+import com.aerospike.client.Key;
 
 /**
  * <p>
@@ -47,8 +47,8 @@
 @Evolving
 public class AerospikePOJONonTransactionalPutOperator extends AbstractAerospikeNonTransactionalPutOperator<Object>
 {
-  private static transient final Logger LOG
-    = LoggerFactory.getLogger(AerospikePOJONonTransactionalPutOperator.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AerospikePOJONonTransactionalPutOperator.class);
 
   // Two element list; first retrieves the record key and second the list of bins in this tuple
   @NotNull
@@ -91,7 +91,7 @@
     }
     Key key = keyGetter.get(tuple);
     List<Bin> binList = binsGetter.get(tuple);
-    if ( ! (null == binList || binList.isEmpty()) ) {
+    if (!(null == binList || binList.isEmpty())) {
       list.addAll(binList);
     }
     return key;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJOTransactionalPutOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJOTransactionalPutOperator.java
index f59b893..7add642 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJOTransactionalPutOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikePOJOTransactionalPutOperator.java
@@ -16,27 +16,24 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
 
 import java.util.ArrayList;
 import java.util.List;
 
 import javax.validation.constraints.NotNull;
 
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
 import com.aerospike.client.Bin;
 import com.aerospike.client.Key;
 
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
-
 /**
  * <p>
  * A generic implementation of
- * {@link com.datatorrent.contrib.aerospike.AbstractAerospikeTransactionalPutOperator} which can
+ * {@link org.apache.apex.malhar.contrib.aerospike.AbstractAerospikeTransactionalPutOperator} which can
  * take a POJO.
  * </p>
  * @displayName Aerospike Transactional Put Operator
@@ -45,12 +42,8 @@
  * @since 2.1.0
  */
 @Evolving
-public class AerospikePOJOTransactionalPutOperator
-  extends AbstractAerospikeTransactionalPutOperator<Object>
+public class AerospikePOJOTransactionalPutOperator extends AbstractAerospikeTransactionalPutOperator<Object>
 {
-  private static transient final Logger LOG
-    = LoggerFactory.getLogger(AerospikePOJOTransactionalPutOperator.class);
-
   // Two element list; first retrieves the record key and second the list of bins in this tuple
   @NotNull
   private ArrayList<String> expressions;
@@ -92,11 +85,10 @@
     }
     Key key = keyGetter.get(tuple);
     List<Bin> binList = binsGetter.get(tuple);
-    if ( ! (null == binList || binList.isEmpty()) ) {
+    if ( !(null == binList || binList.isEmpty()) ) {
       list.addAll(binList);
     }
     return key;
   }
 
-
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeStore.java
index 10809e1..2ffda24 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeStore.java
@@ -16,15 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
 
 import javax.validation.constraints.NotNull;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.db.Connectable;
+
 import com.aerospike.client.AerospikeClient;
 import com.aerospike.client.AerospikeException;
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.db.Connectable;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * A {@link Connectable} that uses aerospike to connect to stores and implements Connectable interface.
@@ -34,8 +37,8 @@
  * @tags store
  * @since 1.0.4
  */
-public class AerospikeStore implements Connectable {
-
+public class AerospikeStore implements Connectable
+{
   protected static final Logger logger = LoggerFactory.getLogger(AerospikeStore.class);
   private String userName;
   private String password;
@@ -49,7 +52,8 @@
    *
    * @param userName user name.
    */
-  public void setUserName(String userName) {
+  public void setUserName(String userName)
+  {
     this.userName = userName;
   }
 
@@ -58,7 +62,8 @@
    *
    * @param password password
    */
-  public void setPassword(String password) {
+  public void setPassword(String password)
+  {
     this.password = password;
   }
 
@@ -68,7 +73,8 @@
    * @return The node
    */
   @NotNull
-  public String getNode() {
+  public String getNode()
+  {
     return node;
   }
 
@@ -77,7 +83,8 @@
    *
    * @param node node
    */
-  public void setNode(@NotNull String node) {
+  public void setNode(@NotNull String node)
+  {
     this.node = node;
   }
 
@@ -86,7 +93,8 @@
    *
    * @return The client
    */
-  public AerospikeClient getClient() {
+  public AerospikeClient getClient()
+  {
     return client;
   }
 
@@ -95,7 +103,8 @@
    *
    * @param port port
    */
-  public void setPort(int port) {
+  public void setPort(int port)
+  {
     this.port = port;
   }
 
@@ -103,15 +112,14 @@
    * Create connection with database.
    */
   @Override
-  public void connect() {
+  public void connect()
+  {
     try {
       client = new AerospikeClient(node, port);
       logger.debug("Aerospike connection Success");
-    }
-    catch (AerospikeException ex) {
+    } catch (AerospikeException ex) {
       throw new RuntimeException("closing database resource", ex);
-    }
-    catch (Throwable t) {
+    } catch (Throwable t) {
       DTThrowable.rethrow(t);
     }
   }
@@ -120,12 +128,14 @@
    * Close connection.
    */
   @Override
-  public void disconnect() {
+  public void disconnect()
+  {
     client.close();
   }
 
   @Override
-  public boolean isConnected() {
+  public boolean isConnected()
+  {
     return !client.isConnected();
   }
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeTransactionalStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeTransactionalStore.java
index 8128a17..8f898e6 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeTransactionalStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/AerospikeTransactionalStore.java
@@ -16,10 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
 
 import javax.annotation.Nonnull;
 
+import org.apache.apex.malhar.lib.db.TransactionableStore;
+
 import com.aerospike.client.AerospikeException;
 import com.aerospike.client.Bin;
 import com.aerospike.client.Key;
@@ -28,7 +30,6 @@
 import com.aerospike.client.query.RecordSet;
 import com.aerospike.client.query.Statement;
 import com.aerospike.client.task.IndexTask;
-import com.datatorrent.lib.db.TransactionableStore;
 
 /**
  * <p>Provides transaction support to the operators by implementing TransactionableStore abstract methods. </p>
@@ -38,8 +39,8 @@
  * @tags store, transactional
  * @since 1.0.4
  */
-public class AerospikeTransactionalStore extends AerospikeStore implements TransactionableStore {
-
+public class AerospikeTransactionalStore extends AerospikeStore implements TransactionableStore
+{
   public static String DEFAULT_APP_ID_COL = "dt_app_id";
   public static String DEFAULT_OPERATOR_ID_COL = "dt_operator_id";
   public static String DEFAULT_WINDOW_COL = "dt_window";
@@ -59,8 +60,8 @@
   private transient boolean inTransaction;
   private transient Statement lastWindowFetchCommand;
 
-  public AerospikeTransactionalStore() {
-
+  public AerospikeTransactionalStore()
+  {
     super();
     metaSet = DEFAULT_META_SET;
     metaTableAppIdColumn = DEFAULT_APP_ID_COL;
@@ -75,8 +76,8 @@
    *
    * @param metaSet meta set name.
    */
-  public void setMetaSet(@Nonnull String metaSet) {
-
+  public void setMetaSet(@Nonnull String metaSet)
+  {
     this.metaSet = metaSet;
   }
 
@@ -86,8 +87,8 @@
    *
    * @param appIdColumn application id column name.
    */
-  public void setMetaTableAppIdColumn(@Nonnull String appIdColumn) {
-
+  public void setMetaTableAppIdColumn(@Nonnull String appIdColumn)
+  {
     this.metaTableAppIdColumn = appIdColumn;
   }
 
@@ -97,8 +98,8 @@
    *
    * @param operatorIdColumn operator id column name.
    */
-  public void setMetaTableOperatorIdColumn(@Nonnull String operatorIdColumn) {
-
+  public void setMetaTableOperatorIdColumn(@Nonnull String operatorIdColumn)
+  {
     this.metaTableOperatorIdColumn = operatorIdColumn;
   }
 
@@ -108,8 +109,8 @@
    *
    * @param windowColumn window column name.
    */
-  public void setMetaTableWindowColumn(@Nonnull String windowColumn) {
-
+  public void setMetaTableWindowColumn(@Nonnull String windowColumn)
+  {
     this.metaTableWindowColumn = windowColumn;
   }
 
@@ -118,14 +119,14 @@
    *
    * @param namespace namespace.
    */
-  public void setNamespace(@Nonnull String namespace) {
-
+  public void setNamespace(@Nonnull String namespace)
+  {
     this.namespace = namespace;
   }
 
   @Override
-  public void connect() {
-
+  public void connect()
+  {
     super.connect();
     createIndexes();
     try {
@@ -133,44 +134,43 @@
       lastWindowFetchCommand.setNamespace(namespace);
       lastWindowFetchCommand.setSetName(metaSet);
       lastWindowFetchCommand.setBinNames(metaTableWindowColumn);
-    }
-    catch (Exception e) {
+    } catch (Exception e) {
       throw new RuntimeException(e);
     }
   }
 
   @Override
-  public void disconnect() {
-
+  public void disconnect()
+  {
     super.disconnect();
   }
 
   @Override
-  public void beginTransaction() {
-
+  public void beginTransaction()
+  {
     inTransaction = true;
   }
 
   @Override
-  public void commitTransaction() {
-
+  public void commitTransaction()
+  {
     inTransaction = false;
   }
 
   @Override
-  public void rollbackTransaction() {
-
+  public void rollbackTransaction()
+  {
     inTransaction = false;
   }
 
   @Override
-  public boolean isInTransaction() {
-
+  public boolean isInTransaction()
+  {
     return inTransaction;
   }
 
-  private void createIndexes() {
-
+  private void createIndexes()
+  {
     IndexTask task;
     try {
       task = client.createIndex(null, namespace, metaSet,
@@ -186,26 +186,25 @@
   }
 
   @Override
-  public long getCommittedWindowId(String appId, int operatorId) {
-
+  public long getCommittedWindowId(String appId, int operatorId)
+  {
     try {
       lastWindowFetchCommand.setFilters(Filter.equal(metaTableOperatorIdColumn, operatorId));
       lastWindowFetchCommand.setFilters(Filter.equal(metaTableAppIdColumn, appId));
       long lastWindow = -1;
       RecordSet recordSet = client.query(null, lastWindowFetchCommand);
-      while(recordSet.next()) {
+      while (recordSet.next()) {
         lastWindow = Long.parseLong(recordSet.getRecord().getValue(metaTableWindowColumn).toString());
       }
       return lastWindow;
-    }
-    catch (AerospikeException ex) {
+    } catch (AerospikeException ex) {
       throw new RuntimeException(ex);
     }
   }
 
   @Override
-  public void storeCommittedWindowId(String appId, int operatorId, long windowId) {
-
+  public void storeCommittedWindowId(String appId, int operatorId, long windowId)
+  {
     try {
       String keyString = appId + String.valueOf(operatorId);
       Key key = new Key(namespace,metaSet,keyString.hashCode());
@@ -213,21 +212,19 @@
       Bin bin2 = new Bin(metaTableOperatorIdColumn,operatorId);
       Bin bin3 = new Bin(metaTableWindowColumn,windowId);
       client.put(null, key, bin1,bin2,bin3);
-    }
-    catch (AerospikeException e) {
+    } catch (AerospikeException e) {
       throw new RuntimeException(e);
     }
   }
 
   @Override
-  public void removeCommittedWindowId(String appId, int operatorId) {
-
+  public void removeCommittedWindowId(String appId, int operatorId)
+  {
     try {
       String keyString = appId + String.valueOf(operatorId);
       Key key = new Key(namespace,metaSet,keyString.hashCode());
       client.delete(null, key);
-    }
-    catch (AerospikeException e) {
+    } catch (AerospikeException e) {
       throw new RuntimeException(e);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/package-info.java
index 97e878c..8a37041 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/aerospike/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.aerospike;
+package org.apache.apex.malhar.contrib.aerospike;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApacheLogInputGenerator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApacheLogInputGenerator.java
index ecd57aa..7f44cc9 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApacheLogInputGenerator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApacheLogInputGenerator.java
@@ -16,18 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.apachelog;
+package org.apache.apex.malhar.contrib.apachelog;
 
-import com.datatorrent.api.*;
-import com.datatorrent.api.Context.OperatorContext;
-import java.io.*;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.text.SimpleDateFormat;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Random;
+import java.util.StringTokenizer;
 import java.util.concurrent.ArrayBlockingQueue;
 import javax.validation.constraints.NotNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.api.DefaultOutputPort;
+import com.datatorrent.api.InputOperator;
+import com.datatorrent.api.Operator;
+
 /**
  * An implementation of input operator and activation listener that simulates the apache logs.
  * <p>
@@ -38,7 +50,7 @@
  */
 public class ApacheLogInputGenerator implements InputOperator, Operator.ActivationListener<OperatorContext>
 {
-  private final static String delimiter = ";";
+  private static final String delimiter = ";";
 
   private transient Random random;
   private transient int ipAddressCount;
@@ -99,13 +111,12 @@
 
   private List<String> readLines(String file) throws IOException
   {
-    List<String> lines = new ArrayList<String>();
+    List<String> lines = new ArrayList<>();
     InputStream in;
     File f = new File(file);
     if (f.exists()) {
       in = new FileInputStream(f);
-    }
-    else {
+    } else {
       in = getClass().getResourceAsStream(file);
     }
     BufferedReader br = new BufferedReader(new InputStreamReader(in));
@@ -114,8 +125,7 @@
       while ((line = br.readLine()) != null) {
         lines.add(line);
       }
-    }
-    finally {
+    } finally {
       br.close();
     }
     return lines;
@@ -150,8 +160,7 @@
         bytes.add(Integer.parseInt(token.nextToken().trim()));
         status.add(Integer.parseInt(token.nextToken().trim()));
       }
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       throw new RuntimeException(e);
     }
 
@@ -214,8 +223,7 @@
           if (maxDelay > 0) {
             try {
               Thread.sleep(random.nextInt(maxDelay));
-            }
-            catch (InterruptedException e) {
+            } catch (InterruptedException e) {
               return;
             }
           }
@@ -232,8 +240,8 @@
     try {
       thread.interrupt();
       thread.join();
-    }
-    catch (InterruptedException ex) {
+    } catch (InterruptedException ex) {
+      // ignore
     }
   }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLocalLog.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLocalLog.java
index 23b4a62..b8d29d4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLocalLog.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLocalLog.java
@@ -16,15 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.apachelog;
+package org.apache.apex.malhar.contrib.apachelog;
+
+import org.apache.apex.malhar.lib.io.ConsoleOutputOperator;
+import org.apache.apex.malhar.lib.io.fs.TailFsInputOperator;
+import org.apache.apex.malhar.lib.logs.ApacheLogParseMapOutputOperator;
+import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
 import com.datatorrent.api.StreamingApplication;
-import com.datatorrent.lib.io.ConsoleOutputOperator;
-import com.datatorrent.lib.io.fs.TailFsInputOperator;
-import com.datatorrent.lib.logs.ApacheLogParseMapOutputOperator;
-import org.apache.hadoop.conf.Configuration;
 
 /**
  * An implementation of Streaming Application that reads from a local apache log file on the fly and dumps the parsed data to output.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLogGenerator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLogGenerator.java
index d624e08..0eef6dc 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLogGenerator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/ApplicationLogGenerator.java
@@ -16,16 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.apachelog;
+package org.apache.apex.malhar.contrib.apachelog;
 
-import com.datatorrent.api.*;
-import com.datatorrent.api.DAG.Locality;
-import com.datatorrent.lib.io.ConsoleOutputOperator;
-import com.datatorrent.lib.logs.ApacheLogParseMapOutputOperator;
-import java.util.ArrayList;
-import java.util.List;
+import org.apache.apex.malhar.lib.io.ConsoleOutputOperator;
+import org.apache.apex.malhar.lib.logs.ApacheLogParseMapOutputOperator;
 import org.apache.hadoop.conf.Configuration;
 
+import com.datatorrent.api.DAG;
+import com.datatorrent.api.DAG.Locality;
+import com.datatorrent.api.StreamingApplication;
+
 /**
  * An implementation of Streaming Application that generates apache log file on the fly and dumps the parsed data to output.
  *
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/GeoIPExtractor.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/GeoIPExtractor.java
index bd226e3..a58c39e 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/GeoIPExtractor.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/GeoIPExtractor.java
@@ -16,19 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.apachelog;
+package org.apache.apex.malhar.contrib.apachelog;
 
+import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
-import java.io.IOException;
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.logs.InformationExtractor;
-import com.maxmind.geoip.Location;
-import com.maxmind.geoip.LookupService;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.logs.InformationExtractor;
+
+import com.maxmind.geoip.Location;
+import com.maxmind.geoip.LookupService;
 
 /**
  * An implementation of InformationExtractor that extracts Geo information from an IP address using maxmind API .
@@ -67,8 +67,7 @@
   {
     try {
       reader = new LookupService(databasePath, LookupService.GEOIP_MEMORY_CACHE | LookupService.GEOIP_CHECK_CACHE);
-    }
-    catch (IOException ex) {
+    } catch (IOException ex) {
       throw new RuntimeException(ex);
     }
   }
@@ -90,8 +89,7 @@
         m.put("ipRegion", location.region);
         m.put("ipCity", location.city);
       }
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       LOG.error("Caught exception when looking up Geo IP for {}:", value, ex);
     }
     return m;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/TimestampExtractor.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/TimestampExtractor.java
index bb87f03..1b6b4b6 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/TimestampExtractor.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/TimestampExtractor.java
@@ -16,20 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.apachelog;
+package org.apache.apex.malhar.contrib.apachelog;
 
-import java.util.Map;
-
-import com.datatorrent.lib.logs.InformationExtractor;
 import java.text.DateFormat;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.HashMap;
+import java.util.Map;
 import javax.validation.constraints.NotNull;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.logs.InformationExtractor;
+
 /**
  * An implementation of InformationExtractor that extracts the time stamp in milliseconds from epoch from an arbitrary date string.
  * <p>
@@ -69,8 +69,7 @@
     try {
       Date date = dateFormat.parse((String)value);
       m.put("timestamp", date.getTime());
-    }
-    catch (ParseException ex) {
+    } catch (ParseException ex) {
       LOG.error("Error parsing \"{}\" to timestamp using \"{}\":", value, dateFormatString, ex);
     }
     return m;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/UserAgentExtractor.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/UserAgentExtractor.java
index f73ff3d..799eb48 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/UserAgentExtractor.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/UserAgentExtractor.java
@@ -16,20 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.apachelog;
+package org.apache.apex.malhar.contrib.apachelog;
 
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
-import com.datatorrent.lib.logs.InformationExtractor;
+import org.apache.apex.malhar.lib.logs.InformationExtractor;
+
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 
-import java.util.concurrent.TimeUnit;
-
 import net.sf.uadetector.ReadableUserAgent;
-import net.sf.uadetector.service.UADetectorServiceFactory;
 import net.sf.uadetector.UserAgentStringParser;
+import net.sf.uadetector.service.UADetectorServiceFactory;
 
 /**
  * An implementation of Information Extractor that extracts the browser and the OS from a user-agent string.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/package-info.java
index d99c7b5..67b151b 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/package-info.java
@@ -20,4 +20,4 @@
  * Apache log application, operators, and utilities.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.apachelog;
+package org.apache.apex.malhar.contrib.apachelog;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/zmq/Application.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/zmq/Application.java
index 738a146..e44c97a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/zmq/Application.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/apachelog/zmq/Application.java
@@ -16,21 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.apachelog.zmq;
+package org.apache.apex.malhar.contrib.apachelog.zmq;
 
 
+import org.apache.apex.malhar.contrib.zmq.SimpleSinglePortZeroMQPullStringInputOperator;
+import org.apache.apex.malhar.lib.algo.UniqueCounter;
+import org.apache.apex.malhar.lib.io.ConsoleOutputOperator;
+import org.apache.apex.malhar.lib.logs.ApacheLogParseOperator;
+import org.apache.apex.malhar.lib.math.Sum;
 import org.apache.hadoop.conf.Configuration;
 
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DAG;
 import com.datatorrent.api.DAG.Locality;
-import com.datatorrent.api.annotation.ApplicationAnnotation;
 import com.datatorrent.api.StreamingApplication;
-import com.datatorrent.contrib.zmq.SimpleSinglePortZeroMQPullStringInputOperator;
-import com.datatorrent.lib.algo.UniqueCounter;
-import com.datatorrent.lib.io.ConsoleOutputOperator;
-import com.datatorrent.lib.logs.ApacheLogParseOperator;
-import com.datatorrent.lib.math.Sum;
+import com.datatorrent.api.annotation.ApplicationAnnotation;
 
 /**
  * <p>An implementation of Streaming Application that populates the DAG. </p>
@@ -40,7 +40,7 @@
  * @tag: streaming
  * @since 0.3.2
  */
-@ApplicationAnnotation(name="ApacheLog")
+@ApplicationAnnotation(name = "ApacheLog")
 public class Application implements StreamingApplication
 {
   private Locality locality = null;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroFileInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroFileInputOperator.java
index f863d41..52200fe 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroFileInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroFileInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.avro;
+package org.apache.apex.malhar.contrib.avro;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -24,6 +24,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.io.fs.AbstractFileInputOperator;
 import org.apache.apex.malhar.lib.wal.FSWindowDataManager;
 import org.apache.avro.AvroRuntimeException;
 import org.apache.avro.file.DataFileStream;
@@ -37,7 +38,6 @@
 
 import com.datatorrent.api.AutoMetric;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.lib.io.fs.AbstractFileInputOperator;
 
 /**
  * <p>
diff --git a/contrib/src/main/java/com/datatorrent/contrib/avro/AvroFileToPojoModule.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroFileToPojoModule.java
similarity index 98%
rename from contrib/src/main/java/com/datatorrent/contrib/avro/AvroFileToPojoModule.java
rename to contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroFileToPojoModule.java
index 8ad00df..4637b8f 100644
--- a/contrib/src/main/java/com/datatorrent/contrib/avro/AvroFileToPojoModule.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroFileToPojoModule.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.avro;
+package org.apache.apex.malhar.contrib.avro;
 
 import org.apache.apex.malhar.lib.wal.FSWindowDataManager;
 import org.apache.avro.generic.GenericRecord;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroRecordHelper.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroRecordHelper.java
index 012e977..176f5eb 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroRecordHelper.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroRecordHelper.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.avro;
+package org.apache.apex.malhar.contrib.avro;
 
 import java.text.ParseException;
 import java.util.List;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroToPojo.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroToPojo.java
index 2acf98c..5539562 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroToPojo.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/AvroToPojo.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.avro;
+package org.apache.apex.malhar.contrib.avro;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -25,6 +25,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.util.FieldInfo;
+import org.apache.apex.malhar.lib.util.FieldInfo.SupportType;
+import org.apache.apex.malhar.lib.util.PojoUtils;
 import org.apache.avro.AvroRuntimeException;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.commons.lang3.ClassUtils;
@@ -40,9 +43,6 @@
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.util.FieldInfo;
-import com.datatorrent.lib.util.FieldInfo.SupportType;
-import com.datatorrent.lib.util.PojoUtils;
 
 /**
  * <p>
@@ -87,7 +87,7 @@
   @AutoMetric
   @VisibleForTesting
   int fieldErrorCount = 0;
-  
+
   public final transient DefaultOutputPort<GenericRecord> errorPort = new DefaultOutputPort<GenericRecord>();
 
   /**
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/PojoToAvro.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/PojoToAvro.java
index 41c56e3..17a4ecc 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/PojoToAvro.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/avro/PojoToAvro.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.avro;
+package org.apache.apex.malhar.contrib.avro;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -25,6 +25,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
 import org.apache.avro.AvroRuntimeException;
 import org.apache.avro.Schema;
 import org.apache.avro.Schema.Field;
@@ -42,8 +44,6 @@
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.InputPortFieldAnnotation;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
 
 /**
  * <p>
@@ -63,7 +63,6 @@
 @InterfaceStability.Evolving
 public class PojoToAvro extends BaseOperator
 {
-
   private List<Field> columnNames;
 
   private Class<?> cls;
@@ -155,7 +154,7 @@
    * @return Getter
    */
   private Getter<?, ?> generateGettersForField(Class<?> cls, String inputFieldName)
-      throws NoSuchFieldException, SecurityException
+    throws NoSuchFieldException, SecurityException
   {
     java.lang.reflect.Field f = cls.getDeclaredField(inputFieldName);
     Class<?> c = ClassUtils.primitiveToWrapper(f.getType());
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraInputOperator.java
index 7bd47fc..3153387 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraInputOperator.java
@@ -16,18 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
 
 import com.datastax.driver.core.PagingState;
 import com.datastax.driver.core.ResultSet;
 import com.datastax.driver.core.Row;
 import com.datastax.driver.core.SimpleStatement;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.netlet.util.DTThrowable;
 
@@ -43,8 +43,8 @@
  * @tags cassandra
  * @since 1.0.2
  */
-public abstract class AbstractCassandraInputOperator<T> extends AbstractStoreInputOperator<T, CassandraStore> {
-
+public abstract class AbstractCassandraInputOperator<T> extends AbstractStoreInputOperator<T, CassandraStore>
+{
   private static final Logger logger = LoggerFactory.getLogger(AbstractCassandraInputOperator.class);
   private PagingState nextPageState;
   private int fetchSize;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraTransactionableOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraTransactionableOutputOperator.java
index 9048383..f0f590d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraTransactionableOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractCassandraTransactionableOutputOperator.java
@@ -16,12 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import java.util.Collection;
 
 import javax.annotation.Nonnull;
 
+import org.apache.apex.malhar.lib.db.AbstractBatchTransactionableStoreOutputOperator;
+
 import com.datastax.driver.core.BatchStatement;
 import com.datastax.driver.core.PreparedStatement;
 import com.datastax.driver.core.Statement;
@@ -29,7 +31,6 @@
 import com.datatorrent.api.Context;
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.Operator.ActivationListener;
-import com.datatorrent.lib.db.AbstractBatchTransactionableStoreOutputOperator;
 
 /**
  * <p>
@@ -86,8 +87,7 @@
   public void processBatch(Collection<T> tuples)
   {
     BatchStatement batchCommand = store.getBatchCommand();
-    for(T tuple: tuples)
-    {
+    for (T tuple: tuples) {
       batchCommand.add(setStatementParameters(updateCommand, tuple));
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractUpsertOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractUpsertOutputOperator.java
index f287385..cadf157 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractUpsertOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/AbstractUpsertOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -949,7 +949,7 @@
 
   /**
    * Implementing concrete Operator instances define the Connection Builder properties by implementing this method
-   * Please refer to {@link com.datatorrent.contrib.cassandra.ConnectionStateManager.ConnectionBuilder} for
+   * Please refer to {@link org.apache.apex.malhar.contrib.cassandra.ConnectionStateManager.ConnectionBuilder} for
    * an example implementation of the ConnectionStateManager instantiation.
    * Note that if this method is returning null, the connection properties are
    * fetched from a properties file loaded from the classpath.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOInputOperator.java
index f43777f..cab83e8 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOInputOperator.java
@@ -16,17 +16,30 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import java.math.BigDecimal;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
 
 import javax.validation.constraints.Min;
 import javax.validation.constraints.NotNull;
 
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.util.FieldInfo;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Setter;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterBoolean;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterDouble;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterFloat;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterInt;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterLong;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
 import com.datastax.driver.core.BoundStatement;
 import com.datastax.driver.core.ColumnDefinitions;
@@ -39,9 +52,6 @@
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.Operator;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.FieldInfo;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.*;
 
 /**
  * <p>
@@ -287,8 +297,7 @@
     try {
       // This code will be replaced after integration of creating POJOs on the fly utility.
       obj = pojoClass.newInstance();
-    }
-    catch (InstantiationException | IllegalAccessException ex) {
+    } catch (InstantiationException | IllegalAccessException ex) {
       throw new RuntimeException(ex);
     }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOOutputOperator.java
index a191bb0..5f7d52a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPOJOOutputOperator.java
@@ -16,28 +16,44 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import java.lang.reflect.Field;
 import java.math.BigDecimal;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
 
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.Lists;
-import com.datastax.driver.core.*;
+import org.apache.apex.malhar.lib.util.FieldInfo;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterBoolean;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterDouble;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterFloat;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterInt;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterLong;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.ColumnDefinitions;
+import com.datastax.driver.core.DataType;
+import com.datastax.driver.core.LocalDate;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.Statement;
 import com.datastax.driver.core.exceptions.DriverException;
+import com.google.common.collect.Lists;
 import com.datatorrent.api.AutoMetric;
 import com.datatorrent.api.Context;
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.InputPortFieldAnnotation;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.FieldInfo;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.*;
 
 /**
  * <p>
@@ -106,10 +122,11 @@
   @Override
   public void activate(Context.OperatorContext context)
   {
-    com.datastax.driver.core.ResultSet rs = store.getSession().execute("select * from " + store.keyspace + "." + tablename);
+    com.datastax.driver.core.ResultSet rs
+        = store.getSession().execute("select * from " + store.keyspace + "." + tablename);
     final ColumnDefinitions rsMetaData = rs.getColumnDefinitions();
 
-    if(fieldInfos == null) {
+    if (fieldInfos == null) {
       populateFieldInfosFromPojo(rsMetaData);
     }
 
@@ -228,13 +245,12 @@
       }
     }
     String statement
-            = "INSERT INTO " + store.keyspace + "."
-            + tablename
-            + " (" + queryfields.toString() + ") "
-            + "VALUES (" + values.toString() + ");";
+        = "INSERT INTO " + store.keyspace + "."
+        + tablename
+        + " (" + queryfields.toString() + ") "
+        + "VALUES (" + values.toString() + ");";
     LOG.debug("statement is {}", statement);
     return store.getSession().prepare(statement);
-
   }
 
   @Override
@@ -316,6 +332,7 @@
       errorRecords++;
     }
   }
+
   /**
    * A list of {@link FieldInfo}s where each item maps a column name to a pojo field name.
    */
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPojoUtils.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPojoUtils.java
index 8b435c9..179ac1a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPojoUtils.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPojoUtils.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import java.math.BigDecimal;
 import java.util.Date;
@@ -25,11 +25,12 @@
 import java.util.Set;
 import java.util.UUID;
 
+import org.apache.apex.malhar.lib.util.PojoUtils;
+
 import com.datastax.driver.core.BoundStatement;
 import com.datastax.driver.core.DataType;
 import com.datastax.driver.core.LocalDate;
 import com.datastax.driver.core.TypeCodec;
-import com.datatorrent.lib.util.PojoUtils;
 
 /**
  * Used to manage simple data type based getters for given cassandra columns
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPreparedStatementGenerator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPreparedStatementGenerator.java
index 713179c..a797bab 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPreparedStatementGenerator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraPreparedStatementGenerator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 
 import java.util.EnumSet;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraStore.java
index 5d9178c..0c298aa 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraStore.java
@@ -16,19 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import javax.validation.constraints.NotNull;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.db.Connectable;
+
 import com.datastax.driver.core.Cluster;
 import com.datastax.driver.core.ProtocolVersion;
 import com.datastax.driver.core.Session;
 import com.datastax.driver.core.exceptions.DriverException;
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.db.Connectable;
 
 /**
  * A {@link Connectable} that uses cassandra to connect to stores and implements Connectable interface.
@@ -67,7 +68,8 @@
    *
    * @param keyspace keyspace.
    */
-  public void setKeyspace(String keyspace) {
+  public void setKeyspace(String keyspace)
+  {
     this.keyspace = keyspace;
   }
 
@@ -106,7 +108,8 @@
   }
 
   @NotNull
-  public String getNode() {
+  public String getNode()
+  {
     return node;
   }
 
@@ -115,15 +118,18 @@
    *
    * @param node node
    */
-  public void setNode(@NotNull String node) {
+  public void setNode(@NotNull String node)
+  {
     this.node = node;
   }
 
-  public Cluster getCluster() {
+  public Cluster getCluster()
+  {
     return cluster;
   }
 
-  public Session getSession() {
+  public Session getSession()
+  {
     return session;
   }
 
@@ -167,20 +173,18 @@
   public void connect()
   {
     try {
-      if(cluster==null)
+      if (cluster == null) {
         buildCluster();
+      }
       session = cluster.connect();
       logger.debug("Cassandra connection Success");
-    }
-    catch (DriverException ex) {
+    } catch (DriverException ex) {
       throw new RuntimeException("closing database resource", ex);
-    }
-    catch (Throwable t) {
+    } catch (Throwable t) {
       DTThrowable.rethrow(t);
     }
   }
 
-
   /**
    * Close connection.
    */
@@ -190,11 +194,9 @@
     try {
       session.close();
       cluster.close();
-    }
-    catch (DriverException ex) {
+    } catch (DriverException ex) {
       throw new RuntimeException("closing database resource", ex);
-    }
-    catch (Throwable t) {
+    } catch (Throwable t) {
       DTThrowable.rethrow(t);
     }
   }
@@ -204,8 +206,7 @@
   {
     try {
       return !session.isClosed();
-    }
-    catch (DriverException ex) {
+    } catch (DriverException ex) {
       throw new RuntimeException("closing database resource", ex);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraTransactionalStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraTransactionalStore.java
index 4c5a71e..29b6ce9 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraTransactionalStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/CassandraTransactionalStore.java
@@ -16,14 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import javax.annotation.Nonnull;
 
-import com.datastax.driver.core.*;
-import com.datastax.driver.core.exceptions.DriverException;
+import org.apache.apex.malhar.lib.db.TransactionableStore;
 
-import com.datatorrent.lib.db.TransactionableStore;
+import com.datastax.driver.core.BatchStatement;
+import com.datastax.driver.core.BoundStatement;
+import com.datastax.driver.core.PreparedStatement;
+import com.datastax.driver.core.ResultSet;
+import com.datastax.driver.core.Statement;
+import com.datastax.driver.core.exceptions.DriverException;
 
 /**
  * <p>Provides transaction support to the operators by implementing TransactionableStore abstract methods. </p>
@@ -34,8 +38,8 @@
  * @tags cassandra, transactional
  * @since 1.0.2
  */
-public class CassandraTransactionalStore extends CassandraStore implements TransactionableStore {
-
+public class CassandraTransactionalStore extends CassandraStore implements TransactionableStore
+{
   public static String DEFAULT_APP_ID_COL = "dt_app_id";
   public static String DEFAULT_OPERATOR_ID_COL = "dt_operator_id";
   public static String DEFAULT_WINDOW_COL = "dt_window";
@@ -116,7 +120,8 @@
     this.metaTableWindowColumn = windowColumn;
   }
 
-  public Statement getLastWindowUpdateStatement() {
+  public Statement getLastWindowUpdateStatement()
+  {
     return lastWindowUpdateStatement;
   }
 
@@ -130,26 +135,24 @@
   {
     super.connect();
     try {
-      String command = "SELECT " + metaTableWindowColumn + " FROM " + keyspace +"."+ metaTable + " WHERE " + metaTableAppIdColumn +
+      String command = "SELECT " + metaTableWindowColumn + " FROM " + keyspace + "." + metaTable +
+          " WHERE " + metaTableAppIdColumn +
           " = ? AND " + metaTableOperatorIdColumn + " = ?";
       logger.debug(command);
       lastWindowFetchCommand = session.prepare(command);
 
-      command = "UPDATE " + keyspace +"."+ metaTable + " SET " + metaTableWindowColumn + " = ? where " + metaTableAppIdColumn + " = ? " +
+      command = "UPDATE " + keyspace + "." + metaTable + " SET " + metaTableWindowColumn + " = ? where " + metaTableAppIdColumn + " = ? " +
           " and " + metaTableOperatorIdColumn + " = ?";
       logger.debug(command);
       lastWindowUpdateCommand = session.prepare(command);
 
-      command = "DELETE FROM " + keyspace +"."+ metaTable + " where " + metaTableAppIdColumn + " = ? and " +
+      command = "DELETE FROM " + keyspace + "." + metaTable + " where " + metaTableAppIdColumn + " = ? and " +
           metaTableOperatorIdColumn + " = ?";
       logger.debug(command);
       lastWindowDeleteCommand = session.prepare(command);
-    }
-    catch (DriverException e) {
+    } catch (DriverException e) {
       throw new RuntimeException(e);
-    }
-    catch (Exception e)
-    {
+    } catch (Exception e) {
       throw new RuntimeException(e);
     }
   }
@@ -160,8 +163,7 @@
     if (lastWindowUpdateCommand != null) {
       try {
         lastWindowUpdateCommand.disableTracing();
-      }
-      catch (DriverException e) {
+      } catch (DriverException e) {
         throw new RuntimeException(e);
       }
     }
@@ -208,8 +210,7 @@
       }
       lastWindowFetchCommand.disableTracing();
       return lastWindow;
-    }
-    catch (DriverException ex) {
+    } catch (DriverException ex) {
       throw new RuntimeException(ex);
     }
   }
@@ -221,8 +222,7 @@
       BoundStatement boundStatement = new BoundStatement(lastWindowUpdateCommand);
       lastWindowUpdateStatement = boundStatement.bind(windowId,appId,operatorId);
       batchCommand.add(lastWindowUpdateStatement);
-    }
-    catch (DriverException e) {
+    } catch (DriverException e) {
       throw new RuntimeException(e);
     }
   }
@@ -235,8 +235,7 @@
       lastWindowDeleteStatement = boundStatement.bind(appId,operatorId);
       session.execute(lastWindowDeleteStatement);
       lastWindowDeleteCommand.disableTracing();
-    }
-    catch (DriverException e) {
+    } catch (DriverException e) {
       throw new RuntimeException(e);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/ConnectionStateManager.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/ConnectionStateManager.java
index f089137..0eccb22 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/ConnectionStateManager.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/ConnectionStateManager.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import java.io.Serializable;
 import java.net.InetAddress;
@@ -64,7 +64,7 @@
  * </p>
  * <p> A typical implementation of the ConnectionBuilder would like this:
  * <pre>
- *     
+ *
  *     public ConnectionStateManager.ConnectionBuilder withConnectionBuilder()
  *     {
  *       return ConnectionStateManager.withNewBuilder()
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/UpsertExecutionContext.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/UpsertExecutionContext.java
index 92be546..10ffb08 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/UpsertExecutionContext.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/UpsertExecutionContext.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
 
 import com.datastax.driver.core.ConsistencyLevel;
 
@@ -122,7 +122,7 @@
 
   /**
    * This decides if we want to override the default TTL if at all set in the
-   * {@link com.datatorrent.contrib.cassandra.ConnectionStateManager.ConnectionBuilder} that is used to execute a
+   * {@link org.apache.apex.malhar.contrib.cassandra.ConnectionStateManager.ConnectionBuilder} that is used to execute a
    * mutation. Note that TTLs are not mandatory for mutations.
    * Also it is supported to have TTLs only for the current execution context but not set a default at the
    * connection state manager level
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/package-info.java
index 1baae5e..93c5518 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/cassandra/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.cassandra;
+package org.apache.apex.malhar.contrib.cassandra;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseInputOperator.java
index 1cd4eb5..3c53a49 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
 import java.io.IOException;
 import java.util.Collection;
@@ -24,21 +24,20 @@
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
+import org.apache.apex.malhar.lib.util.KryoCloneUtils;
+
 import com.couchbase.client.CouchbaseClient;
 import com.couchbase.client.vbucket.config.Config;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
-
 import com.datatorrent.api.Context;
 import com.datatorrent.api.DefaultPartition;
 import com.datatorrent.api.Partitioner;
-
-import com.datatorrent.lib.util.KryoCloneUtils;
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
@@ -94,8 +93,7 @@
       }
       try {
         clientPartition = store.connectServer(serverURIString);
-      }
-      catch (IOException ex) {
+      } catch (IOException ex) {
         DTThrowable.rethrow(ex);
       }
     }
@@ -116,16 +114,16 @@
     List<String> keys = getKeys();
     Object result = null;
     for (String key: keys) {
-        int master = conf.getMaster(conf.getVbucketByKey(key));
-        if (master == getServerIndex()) {
-          result = clientPartition.get(key);
-        }
+      int master = conf.getMaster(conf.getVbucketByKey(key));
+      if (master == getServerIndex()) {
+        result = clientPartition.get(key);
       }
+    }
 
-      if (result != null) {
-        T tuple = getTuple(result);
-        outputPort.emit(tuple);
-      }
+    if (result != null) {
+      T tuple = getTuple(result);
+      outputPort.emit(tuple);
+    }
   }
 
   public abstract T getTuple(Object object);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseOutputOperator.java
index ef3be01..8cc92f2 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseOutputOperator.java
@@ -16,23 +16,24 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.TreeMap;
-import net.spy.memcached.internal.OperationCompletionListener;
-import net.spy.memcached.internal.OperationFuture;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.datatorrent.lib.db.AbstractAggregateTransactionableStoreOutputOperator;
+import org.apache.apex.malhar.lib.db.AbstractAggregateTransactionableStoreOutputOperator;
 
 import com.datatorrent.api.Context.OperatorContext;
-
 import com.datatorrent.netlet.util.DTThrowable;
 
+import net.spy.memcached.internal.OperationCompletionListener;
+import net.spy.memcached.internal.OperationFuture;
+
+
 /**
  * AbstractCouchBaseOutputOperator which extends Transactionable Store Output Operator.
  * Classes extending from this operator should implement the abstract functionality of generateKey, getValue and insertOrUpdate.
@@ -108,10 +109,10 @@
     id++;
     String key = getKey(tuple);
     Object value = getValue(tuple);
-    if(!(value instanceof Boolean) && !(value instanceof Integer) && !(value instanceof String) && !(value instanceof Float) && !(value instanceof Double) && !(value instanceof Character) && !(value instanceof Long) && !(value instanceof Short) && !(value instanceof Byte)){
-    if (serializer != null) {
-      value = serializer.serialize(value);
-    }
+    if (!(value instanceof Boolean) && !(value instanceof Integer) && !(value instanceof String) && !(value instanceof Float) && !(value instanceof Double) && !(value instanceof Character) && !(value instanceof Long) && !(value instanceof Short) && !(value instanceof Byte)) {
+      if (serializer != null) {
+        value = serializer.serialize(value);
+      }
     }
     OperationFuture<Boolean> future = processKeyValue(key, value);
     synchronized (syncObj) {
@@ -135,7 +136,7 @@
   public void waitForQueueSize(int sizeOfQueue)
   {
     long startTms = System.currentTimeMillis();
-    long elapsedTime ;
+    long elapsedTime;
     while (numTuples > sizeOfQueue) {
       synchronized (syncObj) {
         if (numTuples > sizeOfQueue) {
@@ -143,12 +144,10 @@
             elapsedTime = System.currentTimeMillis() - startTms;
             if (elapsedTime >= store.timeout) {
               throw new RuntimeException("Timed out waiting for space in queue");
-            }
-            else {
+            } else {
               syncObj.wait(store.timeout - elapsedTime);
             }
-          }
-          catch (InterruptedException ex) {
+          } catch (InterruptedException ex) {
             DTThrowable.rethrow(ex);
           }
         }
@@ -167,7 +166,7 @@
     public void onComplete(OperationFuture<?> f) throws Exception
     {
       if (!((Boolean)f.get())) {
-        logger.error("Operation failed {}" , f);
+        logger.error("Operation failed {}", f);
         failure = true;
         return;
       }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseSetOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseSetOperator.java
index f33d804..0abe06f 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseSetOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/AbstractCouchBaseSetOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
 import net.spy.memcached.internal.OperationFuture;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseJSONSerializer.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseJSONSerializer.java
index 188d989..0f968bd 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseJSONSerializer.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseJSONSerializer.java
@@ -16,11 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
-import com.datatorrent.netlet.util.DTThrowable;
 import java.io.IOException;
 import org.codehaus.jackson.map.ObjectMapper;
+import com.datatorrent.netlet.util.DTThrowable;
 
 /**
  * <p>CouchBaseJSONSerializer</p>
@@ -43,8 +43,7 @@
     String value = null;
     try {
       value = mapper.writeValueAsString(o);
-    }
-    catch (IOException ex) {
+    } catch (IOException ex) {
       DTThrowable.rethrow(ex);
     }
     return value;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBasePOJOInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBasePOJOInputOperator.java
index 2378aec..91e3b17 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBasePOJOInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBasePOJOInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
 import java.io.IOException;
 import java.util.Iterator;
@@ -24,12 +24,16 @@
 import javax.validation.constraints.Min;
 import javax.validation.constraints.NotNull;
 
-import com.couchbase.client.protocol.views.*;
-
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
+import com.couchbase.client.protocol.views.Query;
+import com.couchbase.client.protocol.views.Stale;
+import com.couchbase.client.protocol.views.View;
+import com.couchbase.client.protocol.views.ViewResponse;
+import com.couchbase.client.protocol.views.ViewRow;
+
 import com.datatorrent.api.Context.OperatorContext;
 
 /**
@@ -167,8 +171,7 @@
     super.setup(context);
     try {
       className = Class.forName(outputClass);
-    }
-    catch (ClassNotFoundException ex) {
+    } catch (ClassNotFoundException ex) {
       throw new RuntimeException(ex);
     }
     view = store.getInstance().getView(designDocumentName, viewName);
@@ -198,8 +201,7 @@
       Object outputObj = null;
       try {
         outputObj = objectMapper.readValue(document.toString(), className);
-      }
-      catch (IOException ex) {
+      } catch (IOException ex) {
         throw new RuntimeException(ex);
       }
       outputPort.emit(outputObj);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseSerializer.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseSerializer.java
index f15758e..0769e82 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseSerializer.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseSerializer.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
 /**
  *
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseStore.java
index 238047e..fb5ba1d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
 import java.io.IOException;
 import java.net.URI;
@@ -28,6 +28,10 @@
 import javax.annotation.Nonnull;
 import javax.validation.constraints.Min;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.db.Connectable;
+
 import com.couchbase.client.CouchbaseClient;
 import com.couchbase.client.CouchbaseConnectionFactoryBuilder;
 import com.couchbase.client.vbucket.ConfigurationProvider;
@@ -35,11 +39,6 @@
 import com.couchbase.client.vbucket.config.Bucket;
 import com.couchbase.client.vbucket.config.Config;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.datatorrent.lib.db.Connectable;
-
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
@@ -108,6 +107,7 @@
   {
     this.queueSize = queueSize;
   }
+
   protected Integer maxTuples = 1000;
   protected int blockTime = 1000;
   protected long timeout = 10000;
@@ -192,8 +192,7 @@
   {
     try {
       connect();
-    }
-    catch (IOException ex) {
+    } catch (IOException ex) {
       DTThrowable.rethrow(ex);
     }
     ConfigurationProvider configurationProvider = new ConfigurationProviderHTTP(baseURIs, userConfig, passwordConfig);
@@ -201,8 +200,7 @@
     Config conf = configBucket.getConfig();
     try {
       disconnect();
-    }
-    catch (IOException ex) {
+    } catch (IOException ex) {
       DTThrowable.rethrow(ex);
     }
     return conf;
@@ -216,8 +214,7 @@
     for (String url : tokens) {
       try {
         uri = new URI("http", url, "/pools", null, null);
-      }
-      catch (URISyntaxException ex) {
+      } catch (URISyntaxException ex) {
         DTThrowable.rethrow(ex);
       }
       baseURIs.add(uri);
@@ -228,9 +225,8 @@
       cfb.setOpQueueMaxBlockTime(blockTime); // wait up to 10 second when trying to enqueue an operation
       client = new CouchbaseClient(cfb.buildCouchbaseConnection(baseURIs, bucket, password));
       //client = new CouchbaseClient(baseURIs, "default", "");
-    }
-    catch (IOException e) {
-      logger.error("Error connecting to Couchbase:" , e);
+    } catch (IOException e) {
+      logger.error("Error connecting to Couchbase:", e);
       DTThrowable.rethrow(e);
     }
   }
@@ -241,15 +237,13 @@
     CouchbaseClient clientPartition = null;
     try {
       nodes.add(new URI("http",urlString,"/pools", null, null));
-    }
-    catch (URISyntaxException ex) {
+    } catch (URISyntaxException ex) {
       DTThrowable.rethrow(ex);
     }
     try {
       clientPartition = new CouchbaseClient(nodes, bucket, password);
-    }
-    catch (IOException e) {
-     logger.error("Error connecting to Couchbase:" , e);
+    } catch (IOException e) {
+      logger.error("Error connecting to Couchbase:", e);
       DTThrowable.rethrow(e);
     }
     return clientPartition;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseWindowStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseWindowStore.java
index 0c57ca6..2f0ce59 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseWindowStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchBaseWindowStore.java
@@ -16,19 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
-import java.io.*;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 
-import com.couchbase.client.CouchbaseClient;
-import com.couchbase.client.CouchbaseConnectionFactoryBuilder;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.datatorrent.lib.db.TransactionableStore;
+import org.apache.apex.malhar.lib.db.TransactionableStore;
+
+import com.couchbase.client.CouchbaseClient;
+import com.couchbase.client.CouchbaseConnectionFactoryBuilder;
 
 import com.datatorrent.netlet.util.DTThrowable;
 
@@ -92,9 +96,8 @@
       cfb.setOpTimeout(timeout);  // wait up to 10 seconds for an operation to succeed
       cfb.setOpQueueMaxBlockTime(blockTime); // wait up to 10 second when trying to enqueue an operation
       clientMeta = new CouchbaseClient(cfb.buildCouchbaseConnection(baseURIs, bucketMeta, passwordMeta));
-    }
-    catch (IOException e) {
-      logger.error("Error connecting to Couchbase: " , e);
+    } catch (IOException e) {
+      logger.error("Error connecting to Couchbase: ", e);
       DTThrowable.rethrow(e);
     }
   }
@@ -119,11 +122,9 @@
     String key = appId + "_" + operatorId + "_" + lastWindowValue;
     try {
       clientMeta.set(key, WindowIdBytes).get();
-    }
-    catch (InterruptedException ex) {
+    } catch (InterruptedException ex) {
       DTThrowable.rethrow(ex);
-    }
-    catch (ExecutionException ex) {
+    } catch (ExecutionException ex) {
       DTThrowable.rethrow(ex);
     }
 
@@ -163,8 +164,7 @@
     try {
       result = dos.readLong();
       dos.close();
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       DTThrowable.rethrow(e);
     }
     return result;
@@ -179,8 +179,7 @@
       dos.writeLong(l);
       result = baos.toByteArray();
       dos.close();
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       DTThrowable.rethrow(e);
     }
     return result;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchbasePOJOSetOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchbasePOJOSetOperator.java
index 5221b02..64ce9e2 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchbasePOJOSetOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/CouchbasePOJOSetOperator.java
@@ -16,18 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
 
 import java.util.ArrayList;
 
 import javax.validation.constraints.NotNull;
 
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
 /**
  * An implementation of Couchbase Output Operator which takes a POJO,serializes it into key,value
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/package-info.java
index 1a6d05b..7b7dc59 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchbase/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.couchbase;
+package org.apache.apex.malhar.contrib.couchbase;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBInputOperator.java
index 1935e5c..ad3e797 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBInputOperator.java
@@ -16,20 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchdb;
+package org.apache.apex.malhar.contrib.couchdb;
 
 import java.io.IOException;
 import java.util.List;
 
 import javax.validation.constraints.Min;
 
-import com.google.common.base.Throwables;
-
 import org.ektorp.ViewQuery;
 import org.ektorp.ViewResult;
 
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
 
+import com.google.common.base.Throwables;
 import com.datatorrent.api.Context;
 
 
@@ -92,8 +91,7 @@
         T tuple = getTuple(row);
         outputPort.emit(tuple);
       }
-    }
-    catch (Throwable cause) {
+    } catch (Throwable cause) {
       Throwables.propagate(cause);
     }
     if (rows.size() > 0) {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBOutputOperator.java
index 5e6c24b..8080eda 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractCouchDBOutputOperator.java
@@ -16,10 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchdb;
+package org.apache.apex.malhar.contrib.couchdb;
 
-import com.datatorrent.lib.db.AbstractStoreOutputOperator;
-import java.util.Map;
+import org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator;
 
 /**
  * Generic base output adaptor which saves tuples in the CouchDb.&nbsp; Subclasses should provide implementation for getting Document Id. <br/>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractMapBasedInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractMapBasedInputOperator.java
index 94c814f..e39a91c 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractMapBasedInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/AbstractMapBasedInputOperator.java
@@ -16,15 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchdb;
-
-import org.codehaus.jackson.map.ObjectMapper;
-import org.ektorp.ViewResult;
+package org.apache.apex.malhar.contrib.couchdb;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.codehaus.jackson.map.ObjectMapper;
+import org.ektorp.ViewResult;
+
 /**
  * A base CouchDb input adaptor that emits a map. <br/>
  * <p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOInputOperator.java
index 8111727..6d5defa 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchdb;
+package org.apache.apex.malhar.contrib.couchdb;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -24,14 +24,18 @@
 
 import javax.validation.constraints.NotNull;
 
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.codehaus.jackson.JsonNode;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.ektorp.ViewQuery;
 import org.ektorp.ViewResult.Row;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Setter;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterBoolean;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterDouble;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterInt;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterLong;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.*;
 import com.datatorrent.api.Context.OperatorContext;
 
 /**
@@ -184,8 +188,7 @@
     try {
       // This code will be replaced after integration of creating POJOs on the fly utility.
       objectClass = Class.forName(outputClass);
-    }
-    catch (ClassNotFoundException ex) {
+    } catch (ClassNotFoundException ex) {
       throw new RuntimeException(ex);
     }
 
@@ -197,18 +200,15 @@
       Class<?> type = null;
       try {
         type = objectClass.getDeclaredField(columns.get(i)).getType();
-      }
-      catch (NoSuchFieldException ex) {
+      } catch (NoSuchFieldException ex) {
         throw new RuntimeException(ex);
-      }
-      catch (SecurityException ex) {
+      } catch (SecurityException ex) {
         throw new RuntimeException(ex);
       }
       fieldType.add(type);
       if (type.isPrimitive()) {
         setterDoc.add(PojoUtils.constructSetter(objectClass, expressions.get(i), type));
-      }
-      else {
+      } else {
         setterDoc.add(PojoUtils.createSetter(objectClass, expressions.get(i), type));
       }
     }
@@ -222,11 +222,9 @@
     Object obj;
     try {
       obj = objectClass.newInstance();
-    }
-    catch (InstantiationException ex) {
+    } catch (InstantiationException ex) {
       throw new RuntimeException(ex);
-    }
-    catch (IllegalAccessException ex) {
+    } catch (IllegalAccessException ex) {
       throw new RuntimeException(ex);
     }
 
@@ -240,21 +238,16 @@
       if (type.isPrimitive()) {
         if (type == int.class) {
           ((SetterInt)setterDoc.get(i)).set(obj, val.get(columns.get(i)).getIntValue());
-        }
-        else if (type == boolean.class) {
+        } else if (type == boolean.class) {
           ((SetterBoolean)setterDoc.get(i)).set(obj, val.get(columns.get(i)).getBooleanValue());
-        }
-        else if (type == long.class) {
+        } else if (type == long.class) {
           ((SetterLong)setterDoc.get(i)).set(obj, val.get(columns.get(i)).getLongValue());
-        }
-        else if (type == double.class) {
+        } else if (type == double.class) {
           ((SetterDouble)setterDoc.get(i)).set(obj, val.get(columns.get(i)).getDoubleValue());
-        }
-        else {
+        } else {
           throw new RuntimeException("Type is not supported");
         }
-      }
-      else {
+      } else {
         ((Setter<Object, Object>)setterDoc.get(i)).set(obj, mapper.readValue(val.get(columns.get(i)), type));
       }
     }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOOutputOperator.java
index f75473c..9d75272 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDBPOJOOutputOperator.java
@@ -16,15 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchdb;
+package org.apache.apex.malhar.contrib.couchdb;
 
 import javax.validation.constraints.NotNull;
 
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
-
 /**
  * Implementation of {@link AbstractCouchDBOutputOperator} that saves a POJO in the couch database. <br/>
  * <p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDbStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDbStore.java
index 88c2eab..31e5423 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDbStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/CouchDbStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchdb;
+package org.apache.apex.malhar.contrib.couchdb;
 
 import java.io.IOException;
 import java.net.MalformedURLException;
@@ -32,7 +32,7 @@
 import org.ektorp.http.StdHttpClient;
 import org.ektorp.impl.StdCouchDbInstance;
 
-import com.datatorrent.lib.db.Connectable;
+import org.apache.apex.malhar.lib.db.Connectable;
 
 /**
  * Implements a CouchDb store. <br/>
@@ -141,8 +141,7 @@
   {
     if (docId != null && dbConnector.contains(docId)) {
       dbConnector.update(document);
-    }
-    else {
+    } else {
       //create a document & if docId is null then couch db will generate a random id.
       dbConnector.create(document);
     }
@@ -166,8 +165,7 @@
     if (dbUrl != null) {
       try {
         builder.url(dbUrl);
-      }
-      catch (MalformedURLException e) {
+      } catch (MalformedURLException e) {
         throw new IllegalArgumentException(e.getMessage());
       }
     }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/MapBasedCouchDbOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/MapBasedCouchDbOutputOperator.java
index f83f93c..6c8b813 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/MapBasedCouchDbOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/MapBasedCouchDbOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.couchdb;
+package org.apache.apex.malhar.contrib.couchdb;
 
 import java.util.Map;
 
@@ -34,7 +34,7 @@
   @Override
   public String getDocumentId(Map<Object, Object> tuple)
   {
-    return (String) tuple.get("_id");
+    return (String)tuple.get("_id");
   }
 
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/package-info.java
index be6ec34..13b7e17 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/couchdb/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.couchdb;
+package org.apache.apex.malhar.contrib.couchdb;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchInputOperator.java
index 022530b..5d9e915 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchInputOperator.java
@@ -16,14 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
 
 import org.elasticsearch.action.search.SearchRequestBuilder;
 import org.elasticsearch.action.search.SearchResponse;
 import org.elasticsearch.search.SearchHit;
 
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
+
 import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
 
 /**
  * This is the base implementation for a non transactional input operator for ElasticSearch
@@ -53,7 +54,7 @@
   /**
    * Initializing transient fields such as ElasticSearchConnectable, SearchRequestBuilder
    *
-   * @see com.datatorrent.lib.db.AbstractStoreInputOperator#setup(com.datatorrent.api.Context.OperatorContext)
+   * @see org.apache.apex.malhar.lib.db.AbstractStoreInputOperator#setup(com.datatorrent.api.Context.OperatorContext)
    */
   @Override
   public void setup(OperatorContext t1)
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchOutputOperator.java
index 0282ae8..34b4440 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/AbstractElasticSearchOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
 
 import java.util.Queue;
 import java.util.concurrent.ArrayBlockingQueue;
@@ -27,9 +27,10 @@
 import org.elasticsearch.action.bulk.BulkResponse;
 import org.elasticsearch.action.index.IndexRequestBuilder;
 
+import org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator;
+
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.db.AbstractStoreOutputOperator;
 
 /**
  * This is the base implementation for a non-transactional batch output operator for ElasticSearch.
@@ -70,7 +71,7 @@
   /**
    * Initialize transient fields such as {@code tupleBatch}
    *
-   * @see com.datatorrent.lib.db.AbstractStoreOutputOperator#setup(com.datatorrent.api.Context.OperatorContext)
+   * @see org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator#setup(com.datatorrent.api.Context.OperatorContext)
    */
   @Override
   public void setup(OperatorContext context)
@@ -83,7 +84,7 @@
    * Adds tuple to the queue.
    * Calls {@link #processBatch()} if queue is full
    *
-   * @see com.datatorrent.lib.db.AbstractStoreOutputOperator#processTuple(java.lang.Object)
+   * @see org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator#processTuple(java.lang.Object)
    */
   public void processTuple(T tuple)
   {
@@ -128,7 +129,8 @@
    * @param tuple
    * @return
    */
-  protected IndexRequestBuilder getIndexRequestBuilder(T tuple){
+  protected IndexRequestBuilder getIndexRequestBuilder(T tuple)
+  {
     IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(store.client, getIndexName(tuple));
     String id = getId(tuple);
     if (id != null) {
@@ -176,22 +178,22 @@
    * @param tuple
    * @return
    */
-    protected abstract String getType(T tuple);
+  protected abstract String getType(T tuple);
 
     /**
      * @return the batchSize
      */
-    public int getBatchSize()
-    {
-      return batchSize;
-    }
+  public int getBatchSize()
+  {
+    return batchSize;
+  }
 
     /**
      * @param batchSize the batchSize to set
      */
-    public void setBatchSize(int batchSize)
-    {
-      this.batchSize = batchSize;
-    }
+  public void setBatchSize(int batchSize)
+  {
+    this.batchSize = batchSize;
+  }
 
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchConnectable.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchConnectable.java
index 34eca95..c607393 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchConnectable.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchConnectable.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
 
 import java.io.IOException;
 
@@ -25,7 +25,7 @@
 import org.elasticsearch.client.transport.TransportClient;
 import org.elasticsearch.common.transport.InetSocketTransportAddress;
 
-import com.datatorrent.lib.db.Connectable;
+import org.apache.apex.malhar.lib.db.Connectable;
 
 /**
  * Elastic search base connector which has basic information for an operator <br>
@@ -91,7 +91,7 @@
   /*
    * (non-Javadoc)
    *
-   * @see com.datatorrent.lib.db.Connectable#connect()
+   * @see org.apache.apex.malhar.lib.db.Connectable#connect()
    */
   @Override
   public void connect() throws IOException
@@ -103,7 +103,7 @@
   /*
    * (non-Javadoc)
    *
-   * @see com.datatorrent.lib.db.Connectable#disconnect()
+   * @see org.apache.apex.malhar.lib.db.Connectable#disconnect()
    */
   @Override
   public void disconnect() throws IOException
@@ -116,7 +116,7 @@
   /*
    * (non-Javadoc)
    *
-   * @see com.datatorrent.lib.db.Connectable#isConnected()
+   * @see org.apache.apex.malhar.lib.db.Connectable#isConnected()
    */
   @Override
   public boolean isConnected()
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapInputOperator.java
index dcbee9d..6774f10 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
 
 import java.util.Map;
 
@@ -50,7 +50,7 @@
   /**
    * {@link SearchRequestBuilder} properties which do not change for each window are set during operator initialization.
    *
-   * @see com.datatorrent.contrib.elasticsearch.AbstractElasticSearchInputOperator#setup(com.datatorrent.api.Context.OperatorContext)
+   * @see org.apache.apex.malhar.contrib.elasticsearch.AbstractElasticSearchInputOperator#setup(com.datatorrent.api.Context.OperatorContext)
    */
   @Override
   public void setup(OperatorContext t1)
@@ -63,14 +63,14 @@
    * (non-Javadoc)
    *
    * @see
-   * com.datatorrent.contrib.elasticsearch.AbstractElasticSearchInputOperator#convertToTuple(org.elasticsearch.search
+   * org.apache.apex.malhar.contrib.elasticsearch.AbstractElasticSearchInputOperator#convertToTuple(org.elasticsearch.search
    * .SearchHit)
    */
   @Override
   protected T convertToTuple(SearchHit hit)
   {
     Map<String, Object> tuple = hit.getSource();
-    return (T) tuple;
+    return (T)tuple;
   }
 
   /**
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapOutputOperator.java
index 8616938..d85f0a3 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchMapOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
 
 import java.util.Map;
 
@@ -50,7 +50,7 @@
    * (non-Javadoc)
    *
    * @see
-   * com.datatorrent.contrib.elasticsearch.AbstractElasticSearchOutputOperator#setSource(org.elasticsearch.action.index
+   * org.apache.apex.malhar.contrib.elasticsearch.AbstractElasticSearchOutputOperator#setSource(org.elasticsearch.action.index
    * .IndexRequestBuilder, java.lang.Object)
    */
   @Override
@@ -62,7 +62,7 @@
   /*
    * (non-Javadoc)
    *
-   * @see com.datatorrent.contrib.elasticsearch.AbstractElasticSearchOutputOperator#getId(java.lang.Object)
+   * @see org.apache.apex.malhar.contrib.elasticsearch.AbstractElasticSearchOutputOperator#getId(java.lang.Object)
    */
   @Override
   protected String getId(T tuple)
@@ -104,7 +104,7 @@
   /*
    * (non-Javadoc)
    *
-   * @see com.datatorrent.contrib.elasticsearch.AbstractElasticSearchOutputOperator#getIndexName(java.lang.Object)
+   * @see org.apache.apex.malhar.contrib.elasticsearch.AbstractElasticSearchOutputOperator#getIndexName(java.lang.Object)
    */
   @Override
   protected String getIndexName(T tuple)
@@ -122,7 +122,7 @@
   }
 
   /* (non-Javadoc)
-   * @see com.datatorrent.contrib.elasticsearch.AbstractElasticSearchOutputOperator#getType(java.lang.Object)
+   * @see org.apache.apex.malhar.contrib.elasticsearch.AbstractElasticSearchOutputOperator#getType(java.lang.Object)
    */
   @Override
   protected String getType(T tuple)
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorOperator.java
index 9f7b66c..614fd59 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
 
 import java.io.IOException;
 
@@ -24,9 +24,9 @@
 
 import org.elasticsearch.action.percolate.PercolateResponse;
 
-import com.datatorrent.common.util.BaseOperator;
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
+import com.datatorrent.common.util.BaseOperator;
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
@@ -50,8 +50,8 @@
   protected transient ElasticSearchPercolatorStore store;
   public final transient DefaultOutputPort<PercolateResponse> outputPort = new DefaultOutputPort<PercolateResponse>();
 
-  public final transient DefaultInputPort<Object> inputPort = new DefaultInputPort<Object>() {
-
+  public final transient DefaultInputPort<Object> inputPort = new DefaultInputPort<Object>()
+  {
     /*
      * (non-Javadoc)
      *
@@ -61,7 +61,7 @@
     public void process(Object tuple)
     {
 
-      PercolateResponse response = store.percolate(new String[] { indexName }, documentType, tuple);
+      PercolateResponse response = store.percolate(new String[] {indexName}, documentType, tuple);
       outputPort.emit(response);
     }
   };
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorStore.java
index c13c025..19639c7 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/ElasticSearchPercolatorStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
 
 import java.io.IOException;
 
@@ -58,7 +58,8 @@
     }
   }
 
-  public PercolateResponse percolate(String[] indexNames, String documentType, Object tuple){
+  public PercolateResponse percolate(String[] indexNames, String documentType, Object tuple)
+  {
     XContentBuilder docBuilder;
     try {
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/package-info.java
index d89d53f..1c74c32 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/elasticsearch/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.elasticsearch;
+package org.apache.apex.malhar.contrib.elasticsearch;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/AbstractEnricher.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/AbstractEnricher.java
index c377b96..b7e8820 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/AbstractEnricher.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/AbstractEnricher.java
@@ -16,12 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.apex.malhar.lib.db.cache.CacheManager;
+import org.apache.apex.malhar.lib.db.cache.CacheStore;
+import org.apache.apex.malhar.lib.db.cache.CacheStore.ExpiryType;
+import org.apache.apex.malhar.lib.util.FieldInfo;
+import org.apache.apex.malhar.lib.util.FieldInfo.SupportType;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import com.esotericsoftware.kryo.NotNull;
@@ -29,11 +34,6 @@
 import com.datatorrent.api.Context;
 import com.datatorrent.api.Operator;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.db.cache.CacheManager;
-import com.datatorrent.lib.db.cache.CacheStore;
-import com.datatorrent.lib.db.cache.CacheStore.ExpiryType;
-import com.datatorrent.lib.util.FieldInfo;
-import com.datatorrent.lib.util.FieldInfo.SupportType;
 
 /**
  * Base class for Enrichment Operator.&nbsp; Subclasses should provide implementation to getKey and convert.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/BackendLoader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/BackendLoader.java
index 845a756..62d11a0 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/BackendLoader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/BackendLoader.java
@@ -16,15 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.util.List;
 
+import org.apache.apex.malhar.lib.db.cache.CacheManager;
+import org.apache.apex.malhar.lib.util.FieldInfo;
 import org.apache.hadoop.classification.InterfaceStability;
 
-import com.datatorrent.lib.db.cache.CacheManager;
-import com.datatorrent.lib.util.FieldInfo;
-
 /**
  * Interface for store to be used in enrichment
  *
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/DelimitedFSLoader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/DelimitedFSLoader.java
index 3121cf1..aa4a8bd 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/DelimitedFSLoader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/DelimitedFSLoader.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.io.IOException;
 import java.util.List;
@@ -30,14 +30,13 @@
 import org.supercsv.io.CsvMapReader;
 import org.supercsv.prefs.CsvPreference;
 
+import org.apache.apex.malhar.contrib.parser.CellProcessorBuilder;
+import org.apache.apex.malhar.contrib.parser.DelimitedSchema;
+import org.apache.apex.malhar.contrib.parser.DelimitedSchema.Field;
+import org.apache.apex.malhar.lib.util.ReusableStringReader;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 
-import com.datatorrent.contrib.parser.CellProcessorBuilder;
-import com.datatorrent.contrib.parser.DelimitedSchema;
-import com.datatorrent.contrib.parser.DelimitedSchema.Field;
-import com.datatorrent.lib.util.ReusableStringReader;
-
 /**
  * This implementation of {@link FSLoader} is used to load data from delimited
  * file.User needs to provide a schema as a string specified in a json format as
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FSLoader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FSLoader.java
index e04d6c4..cc23cb2 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FSLoader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FSLoader.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.io.BufferedReader;
 import java.io.IOException;
@@ -28,6 +28,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.db.cache.CacheManager;
+import org.apache.apex.malhar.lib.util.FieldInfo;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -38,8 +40,6 @@
 import com.google.common.collect.Maps;
 
 import com.datatorrent.api.Component;
-import com.datatorrent.lib.db.cache.CacheManager;
-import com.datatorrent.lib.util.FieldInfo;
 
 /**
  * This implementation of {@link BackendLoader} loads the data from a given file
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FixedWidthFSLoader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FixedWidthFSLoader.java
index 2f54f66..e855c07 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FixedWidthFSLoader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/FixedWidthFSLoader.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
@@ -30,6 +30,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.contrib.parser.AbstractCsvParser.FIELD_TYPE;
+import org.apache.apex.malhar.contrib.parser.AbstractCsvParser.Field;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -39,9 +41,6 @@
 import com.univocity.parsers.fixed.FixedWidthParser;
 import com.univocity.parsers.fixed.FixedWidthParserSettings;
 
-import com.datatorrent.contrib.parser.AbstractCsvParser.FIELD_TYPE;
-import com.datatorrent.contrib.parser.AbstractCsvParser.Field;
-
 /**
  * This implementation of {@link FSLoader} is used to load data from fixed width
  * file.User needs to set {@link FixedWidthFSLoader#fieldDescription} to specify
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JDBCLoader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JDBCLoader.java
index ce02c08..0ed9e22 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JDBCLoader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JDBCLoader.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
@@ -26,12 +26,12 @@
 import java.util.List;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.db.jdbc.JdbcStore;
+import org.apache.apex.malhar.lib.util.FieldInfo;
 import org.apache.commons.collections.CollectionUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import com.google.common.collect.Lists;
-import com.datatorrent.lib.db.jdbc.JdbcStore;
-import com.datatorrent.lib.util.FieldInfo;
 
 /**
  * <p>HBaseLoader extends from {@link JdbcStore} uses JDBC to connect and implements BackendLoaders interface.</p> <br/>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JsonFSLoader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JsonFSLoader.java
index 183d07a..1347100 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JsonFSLoader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/JsonFSLoader.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.io.IOException;
 import java.util.Map;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/MapEnricher.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/MapEnricher.java
index 6e7b5f7..d3d8df4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/MapEnricher.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/MapEnricher.java
@@ -16,21 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.FieldInfo;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.lib.util.FieldInfo;
 
 /**
  * This class takes a HashMap tuple as input and extracts value of the lookupKey configured
- * for this operator. It perform a lookup using {@link com.datatorrent.lib.db.cache.CacheManager} to
+ * for this operator. It perform a lookup using {@link org.apache.apex.malhar.lib.db.cache.CacheManager} to
  * find a matching entry and adds the result to the original tuple.
  *
  * <p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/NullValuesCacheManager.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/NullValuesCacheManager.java
index 6d42fec..74f3f97 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/NullValuesCacheManager.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/NullValuesCacheManager.java
@@ -16,12 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
+import org.apache.apex.malhar.lib.db.cache.CacheManager;
 import org.apache.hadoop.classification.InterfaceStability;
 
-import com.datatorrent.lib.db.cache.CacheManager;
-
 /**
  * Null Values Cache Manager. Using this NULL entries can be specified explicitly.
  *
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/POJOEnricher.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/POJOEnricher.java
index f22acdb..1f979ee 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/POJOEnricher.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/POJOEnricher.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.lang.reflect.Field;
 import java.util.ArrayList;
@@ -26,6 +26,9 @@
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.util.FieldInfo;
+import org.apache.apex.malhar.lib.util.PojoUtils;
 import org.apache.commons.lang3.ClassUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -35,13 +38,11 @@
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.InputPortFieldAnnotation;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.FieldInfo;
-import com.datatorrent.lib.util.PojoUtils;
 
 
 /**
  * This class takes a POJO as input and extracts the value of the lookupKey configured
- * for this operator. It perform a lookup using {@link com.datatorrent.lib.db.cache.CacheManager} to
+ * for this operator. It perform a lookup using {@link org.apache.apex.malhar.lib.db.cache.CacheManager} to
  * find a matching entry and adds the result to the original tuple.
  *
  * <p>
@@ -212,7 +213,7 @@
 
   @SuppressWarnings({ "unchecked", "rawtypes" })
   private PojoUtils.Setter generateSettersForField(Class<?> klass, String outputFieldName)
-      throws NoSuchFieldException, SecurityException
+    throws NoSuchFieldException, SecurityException
   {
     Field f = klass.getDeclaredField(outputFieldName);
     Class c = ClassUtils.primitiveToWrapper(f.getType());
@@ -221,7 +222,7 @@
 
   @SuppressWarnings({ "unchecked", "rawtypes" })
   private PojoUtils.Getter generateGettersForField(Class<?> klass, String inputFieldName)
-      throws NoSuchFieldException, SecurityException
+    throws NoSuchFieldException, SecurityException
   {
     Field f = klass.getDeclaredField(inputFieldName);
     Class c = ClassUtils.primitiveToWrapper(f.getType());
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/ReadOnlyBackup.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/ReadOnlyBackup.java
index c6afbe1..0ae3ee7 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/ReadOnlyBackup.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/enrich/ReadOnlyBackup.java
@@ -16,15 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.enrich;
+package org.apache.apex.malhar.contrib.enrich;
 
 import java.util.List;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.FieldInfo;
 import org.apache.hadoop.classification.InterfaceStability;
 
-import com.datatorrent.lib.util.FieldInfo;
-
 /**
  * ReadOnly abstract implementation of BackendLoader.
  *
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/formatter/CsvFormatter.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/formatter/CsvFormatter.java
index 2bd0e67..3469039 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/formatter/CsvFormatter.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/formatter/CsvFormatter.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.formatter;
+package org.apache.apex.malhar.contrib.formatter;
 
 import java.io.IOException;
 import java.io.StringWriter;
@@ -34,14 +34,15 @@
 import org.supercsv.io.ICsvBeanWriter;
 import org.supercsv.prefs.CsvPreference;
 
+import org.apache.apex.malhar.contrib.parser.DelimitedSchema;
+import org.apache.apex.malhar.contrib.parser.DelimitedSchema.Field;
+import org.apache.apex.malhar.contrib.parser.Schema.FieldType;
+import org.apache.apex.malhar.lib.formatter.Formatter;
+
 import com.google.common.annotations.VisibleForTesting;
 
 import com.datatorrent.api.AutoMetric;
 import com.datatorrent.api.Context;
-import com.datatorrent.contrib.parser.DelimitedSchema;
-import com.datatorrent.contrib.parser.DelimitedSchema.Field;
-import com.datatorrent.contrib.parser.Schema.FieldType;
-import com.datatorrent.lib.formatter.Formatter;
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeInputOperator.java
index 497e6e4..d56d471 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeInputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.geode;
+package org.apache.apex.malhar.contrib.geode;
 
-import com.datatorrent.lib.db.AbstractKeyValueStoreInputOperator;
+import org.apache.apex.malhar.lib.db.AbstractKeyValueStoreInputOperator;
 
 /**
  * This is the base implementation used for geode input adapters.&nbsp; A
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeOutputOperator.java
index dd0bad2..d3951fc 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/AbstractGeodeOutputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.geode;
+package org.apache.apex.malhar.contrib.geode;
 
-import com.datatorrent.lib.db.AbstractStoreOutputOperator;
+import org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator;
 
 /**
  * This is the base implementation of geode output operators.&nbsp; A concrete
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeCheckpointStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeCheckpointStore.java
index 2152b97..0c01373 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeCheckpointStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeCheckpointStore.java
@@ -16,9 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.geode;
+package org.apache.apex.malhar.contrib.geode;
 
-import com.datatorrent.lib.util.StorageAgentKeyValueStore;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.util.StorageAgentKeyValueStore;
+
 import com.esotericsoftware.kryo.Kryo;
 import com.esotericsoftware.kryo.io.Input;
 import com.esotericsoftware.kryo.io.Output;
@@ -32,17 +44,6 @@
 import com.gemstone.gemfire.cache.query.QueryService;
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.google.common.collect.Maps;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
 
 /**
  * Geode Store implementation of {@link StorageAgentKeyValueStore} Uses {@link Kryo}
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeKeyValueStorageAgent.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeKeyValueStorageAgent.java
index 691c2c1..819d531 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeKeyValueStorageAgent.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeKeyValueStorageAgent.java
@@ -16,14 +16,13 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.geode;
+package org.apache.apex.malhar.contrib.geode;
 
 import java.io.Serializable;
 
+import org.apache.apex.malhar.lib.util.AbstractKeyValueStorageAgent;
 import org.apache.hadoop.conf.Configuration;
 
-import com.datatorrent.lib.util.AbstractKeyValueStorageAgent;
-
 /**
  * Storage Agent implementation which uses {@link GeodeCheckpointStore} for operator
  * checkpointing
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodePOJOOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodePOJOOutputOperator.java
index c7d22c7..d2001e5 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodePOJOOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodePOJOOutputOperator.java
@@ -16,15 +16,14 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.geode;
+package org.apache.apex.malhar.contrib.geode;
 
+import org.apache.apex.malhar.lib.util.FieldInfo;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.apex.malhar.lib.util.TableInfo;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
-import com.datatorrent.lib.util.FieldInfo;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
-import com.datatorrent.lib.util.TableInfo;
-
 /**
  *
  * @displayName Geode Output Operator
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeStore.java
index d345661..803a83e 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/GeodeStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.geode;
+package org.apache.apex.malhar.contrib.geode;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -27,7 +27,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.accumulo.core.client.impl.thrift.ThriftTest.Processor.throwsError;
+import org.apache.apex.malhar.lib.db.KeyValueStore;
 
 import com.gemstone.gemfire.cache.CacheClosedException;
 import com.gemstone.gemfire.cache.CacheWriterException;
@@ -43,8 +43,6 @@
 import com.gemstone.gemfire.cache.query.SelectResults;
 import com.gemstone.gemfire.cache.query.TypeMismatchException;
 
-import com.datatorrent.lib.db.KeyValueStore;
-
 /**
  * Provides the implementation of a Geode store.
  * Geode is a distributed in-memory database
@@ -145,7 +143,7 @@
     if (region == null) {
       region = clientCache.getRegion(regionName);
       if (region == null) {
-        region = clientCache.<Object, Object> createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
+        region = clientCache.<Object, Object>createClientRegionFactory(ClientRegionShortcut.PROXY).create(regionName);
       }
     }
 
@@ -164,7 +162,7 @@
     region = clientCache.getRegion(getRegionName());
 
     if (region == null) {
-      region = clientCache.<Object, Object> createClientRegionFactory(ClientRegionShortcut.PROXY).create(
+      region = clientCache.<Object, Object>createClientRegionFactory(ClientRegionShortcut.PROXY).create(
           getRegionName());
     }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/RegionCreateFunction.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/RegionCreateFunction.java
index 9e948c4..d7aa9d5 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/RegionCreateFunction.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/geode/RegionCreateFunction.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.geode;
+package org.apache.apex.malhar.contrib.geode;
 
 import java.util.ArrayList;
 import java.util.List;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseAppendOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseAppendOutputOperator.java
index a2c1297..d83768d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseAppendOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseAppendOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 
@@ -29,7 +29,7 @@
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
- * A base implementation of a StoreOutputOperator operator that stores tuples in HBase columns and offers 
+ * A base implementation of a StoreOutputOperator operator that stores tuples in HBase columns and offers
  * non-transactional append.&nbsp; Subclasses should provide implementation for appending operations. <br>
  * <p>
  * <br>
@@ -49,16 +49,19 @@
  *            The tuple type
  * @since 1.0.2
  */
-public abstract class AbstractHBaseAppendOutputOperator<T> extends AbstractHBaseOutputOperator<T> {
+public abstract class AbstractHBaseAppendOutputOperator<T> extends AbstractHBaseOutputOperator<T>
+{
   private static final transient Logger logger = LoggerFactory
       .getLogger(AbstractHBaseAppendOutputOperator.class);
 
-  public AbstractHBaseAppendOutputOperator() {
+  public AbstractHBaseAppendOutputOperator()
+  {
     store = new HBaseStore();
   }
 
   @Override
-  public void processTuple(T tuple, HTable table) {
+  public void processTuple(T tuple, HTable table)
+  {
     Append append = operationAppend(tuple);
     try {
       table.append(append);
diff --git a/contrib/src/main/java/com/datatorrent/contrib/hbase/AbstractHBaseOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseOutputOperator.java
similarity index 93%
rename from contrib/src/main/java/com/datatorrent/contrib/hbase/AbstractHBaseOutputOperator.java
rename to contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseOutputOperator.java
index b9d3c49..f07f123 100644
--- a/contrib/src/main/java/com/datatorrent/contrib/hbase/AbstractHBaseOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseOutputOperator.java
@@ -16,15 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
+import org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator;
 import org.apache.hadoop.hbase.client.HTable;
 
 import com.datatorrent.api.Operator;
-import com.datatorrent.lib.db.AbstractStoreOutputOperator;
 
 /**
- * A base implementation of a HBase output operator that stores tuples in HBase and offers 
+ * A base implementation of a HBase output operator that stores tuples in HBase and offers
  * non-transactional Insert.&nbsp; Subclasses should provide implementation for specific Hbase operations.
  *
  * @since 3.8.0
@@ -32,7 +32,7 @@
 public abstract class AbstractHBaseOutputOperator<T> extends AbstractStoreOutputOperator<T, HBaseStore> implements OutputAdapter.Driver<T>, Operator.CheckpointNotificationListener
 {
   private transient OutputAdapter<T> outputAdapter;
-  
+
   public AbstractHBaseOutputOperator()
   {
     outputAdapter = new OutputAdapter<T>(store, this);
@@ -43,13 +43,13 @@
   {
     outputAdapter.processTuple(tuple);
   }
-  
+
   @Override
   public abstract void processTuple(T tuple, HTable table);
 
   /**
    * Get the table name for tuple.
-   * 
+   *
    * Implementations can override this method to return the table name where the tuple should be written to.
    * Return null to write to default table
    * @param tuple The tuple
@@ -84,5 +84,5 @@
   {
 
   }
-  
+
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBasePutOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBasePutOutputOperator.java
index 2421936..cc5f8e4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBasePutOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBasePutOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.InterruptedIOException;
 
@@ -48,7 +48,8 @@
  *            The tuple type
  * @since 1.0.2
  */
-public abstract class AbstractHBasePutOutputOperator<T> extends AbstractHBaseOutputOperator<T> {
+public abstract class AbstractHBasePutOutputOperator<T> extends AbstractHBaseOutputOperator<T>
+{
   private static final transient Logger logger = LoggerFactory.getLogger(AbstractHBasePutOutputOperator.class);
 
   public AbstractHBasePutOutputOperator()
@@ -56,7 +57,6 @@
     store = new HBaseStore();
   }
 
-  @Override
   public void processTuple(T tuple, HTable table)
   {
     Put put = operationPut(tuple);
@@ -72,5 +72,5 @@
   }
 
   public abstract Put operationPut(T t);
-  
+
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowAppendOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowAppendOutputOperator.java
index 2fa2146..1747cde 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowAppendOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowAppendOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 
@@ -30,7 +30,9 @@
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
- * A base implementation of an AggregateTransactionableStoreOutputOperator operator that stores tuples in HBase columns and provides batch append.&nbsp; Subclasses should provide implementation for appending operations. <br>
+ * A base implementation of an AggregateTransactionableStoreOutputOperator
+ * operator that stores tuples in HBase columns and provides batch append.&nbsp;
+ * Subclasses should provide implementation for appending operations. <br>
  * <p>
  * <br>
  * This class provides a HBase output operator that can be used to store tuples
@@ -45,17 +47,17 @@
  * guarantee each tuple is written only once to HBase in case the operator is
  * restarted from an earlier checkpoint. It only tries to minimize the number of
  * duplicates limiting it to the tuples that were processed in the window when
- * the operator shutdown.
- * It supports atleast once and atmost once processing modes.
- * Exactly once is not supported
+ * the operator shutdown. It supports atleast once and atmost once processing
+ * modes. Exactly once is not supported
+ *
  * @displayName Abstract HBase Window Append Output
  * @category Output
  * @tags hbase, append, transactionable, batch
- * @param <T>
- *            The tuple type
+ * @param <T> The tuple type
  * @since 1.0.2
  */
-public abstract class AbstractHBaseWindowAppendOutputOperator<T> extends AbstractHBaseWindowOutputOperator<T> {
+public abstract class AbstractHBaseWindowAppendOutputOperator<T> extends AbstractHBaseWindowOutputOperator<T>
+{
   private static final transient Logger logger = LoggerFactory.getLogger(AbstractHBaseWindowAppendOutputOperator.class);
   private transient ProcessingMode mode;
 
@@ -74,12 +76,14 @@
     this.mode = mode;
   }
 
-  public AbstractHBaseWindowAppendOutputOperator() {
+  public AbstractHBaseWindowAppendOutputOperator()
+  {
     store = new HBaseWindowStore();
   }
 
   @Override
-  public void processTuple(T tuple, HTable table) {
+  public void processTuple(T tuple, HTable table)
+  {
     try {
       Append append = operationAppend(tuple);
       table.append(append);
@@ -103,8 +107,8 @@
   @Override
   public void setup(OperatorContext context)
   {
-    mode=context.getValue(OperatorContext.PROCESSING_MODE);
-    if(mode==ProcessingMode.EXACTLY_ONCE){
+    mode = context.getValue(OperatorContext.PROCESSING_MODE);
+    if (mode == ProcessingMode.EXACTLY_ONCE) {
       throw new RuntimeException("This operator only supports atmost once and atleast once processing modes");
     }
     super.setup(context);
diff --git a/contrib/src/main/java/com/datatorrent/contrib/hbase/AbstractHBaseWindowOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowOutputOperator.java
similarity index 94%
rename from contrib/src/main/java/com/datatorrent/contrib/hbase/AbstractHBaseWindowOutputOperator.java
rename to contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowOutputOperator.java
index 665b463..fb21bf9 100644
--- a/contrib/src/main/java/com/datatorrent/contrib/hbase/AbstractHBaseWindowOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowOutputOperator.java
@@ -16,15 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.db.AbstractPassThruTransactionableStoreOutputOperator;
 import org.apache.hadoop.hbase.client.HTable;
 
 import com.datatorrent.api.Operator;
-import com.datatorrent.lib.db.AbstractPassThruTransactionableStoreOutputOperator;
 
 /**
  * A base implementation of an AggregateTransactionableStoreOutputOperator that stores tuples in HBase
@@ -38,7 +38,7 @@
    * Both at checkpoint window and end window, flush the tuples as application window may not align with
    * end window especially when it is more than one streaming window
    */
-  
+
   private static final Logger logger = LoggerFactory.getLogger(AbstractHBaseWindowOutputOperator.class);
 
   private transient OutputAdapter<T> outputAdapter;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowPutOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowPutOutputOperator.java
index 70fbeeb..e6c65fc 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowPutOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/AbstractHBaseWindowPutOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 
@@ -30,7 +30,9 @@
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
- * A base implementation of an AggregateTransactionableStoreOutputOperator operator for storing tuples in HBase rows and provides a batch put.Subclasses should provide implementation for put operation. <br>
+ * A base implementation of an AggregateTransactionableStoreOutputOperator
+ * operator for storing tuples in HBase rows and provides a batch put.Subclasses
+ * should provide implementation for put operation. <br>
  * <p>
  * <br>
  * This class provides a HBase output operator that can be used to store tuples
@@ -45,20 +47,20 @@
  * guarantee each tuple is written only once to HBase in case the operator is
  * restarted from an earlier checkpoint. It only tries to minimize the number of
  * duplicates limiting it to the tuples that were processed in the window when
- * the operator shutdown.
- * It supports atleast once and atmost once processing modes.
- * Exactly once is not supported
+ * the operator shutdown. It supports atleast once and atmost once processing
+ * modes. Exactly once is not supported
+ *
  * @displayName Abstract HBase Window Put Output
  * @category Output
  * @tags hbase, put, transactionable, batch
- * @param <T>
- *            The tuple type
+ * @param <T> The tuple type
  * @since 1.0.2
  */
-public abstract class AbstractHBaseWindowPutOutputOperator<T> extends AbstractHBaseWindowOutputOperator<T> {
+public abstract class AbstractHBaseWindowPutOutputOperator<T> extends AbstractHBaseWindowOutputOperator<T>
+{
   private static final transient Logger logger = LoggerFactory.getLogger(AbstractHBaseWindowPutOutputOperator.class);
   private transient ProcessingMode mode;
-  
+
   @Deprecated
   public ProcessingMode getMode()
   {
@@ -71,12 +73,14 @@
     this.mode = mode;
   }
 
-  public AbstractHBaseWindowPutOutputOperator() {
+  public AbstractHBaseWindowPutOutputOperator()
+  {
     store = new HBaseWindowStore();
   }
 
   @Override
-  public void processTuple(T tuple, HTable table) {
+  public void processTuple(T tuple, HTable table)
+  {
     try {
       Put put = operationPut(tuple);
       table.put(put);
@@ -86,14 +90,13 @@
     }
   }
 
-
   public abstract Put operationPut(T t) throws IOException;
 
   @Override
   public void setup(OperatorContext context)
   {
-    mode=context.getValue(context.PROCESSING_MODE);
-    if(mode==ProcessingMode.EXACTLY_ONCE){
+    mode = context.getValue(context.PROCESSING_MODE);
+    if (mode == ProcessingMode.EXACTLY_ONCE) {
       throw new RuntimeException("This operator only supports atmost once and atleast once processing modes");
     }
     super.setup(context);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseCsvMappingPutOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseCsvMappingPutOperator.java
index 7420569..468dd8c 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseCsvMappingPutOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseCsvMappingPutOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 import java.io.StringReader;
@@ -25,27 +25,31 @@
 import java.util.Map;
 import java.util.Map.Entry;
 
-import org.apache.hadoop.hbase.client.Put;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.supercsv.io.CsvListReader;
 import org.supercsv.io.ICsvListReader;
 import org.supercsv.prefs.CsvPreference;
+import org.apache.apex.malhar.lib.util.ReusableStringReader;
+import org.apache.hadoop.hbase.client.Put;
 
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.util.ReusableStringReader;
 
 /**
- * Takes a configuration string which tells us about the position of the row, or column.&nbsp; The incoming tuples are inserted accordingly.
+ * Takes a configuration string which tells us about the position of the row, or
+ * column.&nbsp; The incoming tuples are inserted accordingly.
  * <p>
+ *
  * @displayName HBase Csv Mapping Put
  * @category Output
  * @tags hbase, csv, put, String
  * @since 1.0.4
  */
-public class HBaseCsvMappingPutOperator extends AbstractHBaseWindowPutOutputOperator<String> {
-  private class ColDef {
+public class HBaseCsvMappingPutOperator extends AbstractHBaseWindowPutOutputOperator<String>
+{
+  private class ColDef
+  {
     String colFam;
     String colName;
   }
@@ -58,23 +62,26 @@
   private transient ArrayList<String> csvLineList = new ArrayList<String>();
   private String mappingString;
 
-  public void setMappingString(String mappingString) {
+  public void setMappingString(String mappingString)
+  {
     this.mappingString = mappingString;
   }
 
   @Override
-  public Put operationPut(String t) throws IOException {
+  public Put operationPut(String t) throws IOException
+  {
     return parseLine(t);
   }
 
-  public void parseMapping() {
+  public void parseMapping()
+  {
     ICsvListReader listReader = null;
     StringReader sr = null;
     ArrayList<String> csvList = new ArrayList<String>();
     try {
       sr = new StringReader(mappingString);
       listReader = new CsvListReader(sr,CsvPreference.STANDARD_PREFERENCE);
-      csvList = (ArrayList<String>) listReader.read();
+      csvList = (ArrayList<String>)listReader.read();
     } catch (IOException e) {
       logger.error("Cannot read the mapping string", e);
       DTThrowable.rethrow(e);
@@ -89,9 +96,9 @@
     }
     for (int index = 0; index < csvList.size(); index++) {
       String value = csvList.get(index);
-      if (value.equals("row"))
+      if (value.equals("row")) {
         rowIndex = index;
-      else {
+      } else {
         ColDef c = new ColDef();
         c.colFam = value.substring(0, value.indexOf('.'));
         c.colName = value.substring(value.indexOf('.') + 1);
@@ -100,11 +107,12 @@
     }
   }
 
-  public Put parseLine(String s) {
+  public Put parseLine(String s)
+  {
     Put put = null;
     try {
       lineSr.open(s);
-      csvLineList = (ArrayList<String>) lineListReader.read();
+      csvLineList = (ArrayList<String>)lineListReader.read();
     } catch (IOException e) {
       logger.error("Cannot read the property string", e);
       DTThrowable.rethrow(e);
@@ -120,15 +128,17 @@
   }
 
   @Override
-  public void setup(OperatorContext context) {
+  public void setup(OperatorContext context)
+  {
     super.setup(context);
     parseMapping();
     lineListReader = new CsvListReader(lineSr,
-        CsvPreference.STANDARD_PREFERENCE);
+    CsvPreference.STANDARD_PREFERENCE);
   }
 
   @Override
-  public void teardown() {
+  public void teardown()
+  {
     super.teardown();
     try {
       lineSr.close();
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldInfo.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldInfo.java
index 6a34a91..23fa2c5 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldInfo.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldInfo.java
@@ -16,96 +16,83 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
+import org.apache.apex.malhar.lib.util.FieldInfo;
 import org.apache.hadoop.hbase.util.Bytes;
 
-import com.datatorrent.lib.util.FieldInfo;
-
 /**
  * @since 3.1.0
  */
 
 public class HBaseFieldInfo extends FieldInfo
 {
-	private String familyName;
+  private String familyName;
 
-	public HBaseFieldInfo()
-	{
-	}
+  public HBaseFieldInfo()
+  {
+  }
 
-	public HBaseFieldInfo( String columnName, String columnExpression, SupportType type, String familyName )
-	{
-	  super( columnName, columnExpression, type );
-	  setFamilyName( familyName );
-	}
+  public HBaseFieldInfo( String columnName, String columnExpression, SupportType type, String familyName )
+  {
+    super( columnName, columnExpression, type );
+    setFamilyName( familyName );
+  }
 
-	public String getFamilyName()
-	{
-		return familyName;
-	}
+  public String getFamilyName()
+  {
+    return familyName;
+  }
 
-	public void setFamilyName(String familyName)
-	{
-		this.familyName = familyName;
-	}
+  public void setFamilyName(String familyName)
+  {
+    this.familyName = familyName;
+  }
 
-	public byte[] toBytes( Object value )
-	{
-		final SupportType type = getType();
-		switch( type )
-		{
-		case BOOLEAN:
-		  return Bytes.toBytes( (Boolean)value );
-
-		case SHORT:
-		  return Bytes.toBytes( (Short)value );
-
-		case INTEGER:
-		  return Bytes.toBytes( (Integer)value );
-
-		case LONG:
-		  return Bytes.toBytes( (Long)value );
-
-		case FLOAT:
-		  return Bytes.toBytes( (Float)value );
-
-		case DOUBLE:
-		  return Bytes.toBytes( (Double)value );
-
-		case STRING:
-		  return Bytes.toBytes( (String)value );
-		}
-		throw new IllegalArgumentException( "Unsupported type: " + type );
-	}
-
-	public Object toValue( byte[] bytes )
-	{
+  public byte[] toBytes( Object value )
+  {
     final SupportType type = getType();
-    switch( type )
-    {
-    case BOOLEAN:
-      return Bytes.toBoolean( bytes );
-
-    case SHORT:
-      return Bytes.toShort( bytes );
-
-    case INTEGER:
-      return Bytes.toInt( bytes );
-
-    case LONG:
-      return Bytes.toLong( bytes );
-
-    case FLOAT:
-      return Bytes.toFloat( bytes );
-
-    case DOUBLE:
-      return Bytes.toDouble( bytes );
-
-    case STRING:
-      return Bytes.toString( bytes );
+    switch (type) {
+      case BOOLEAN:
+        return Bytes.toBytes( (Boolean)value );
+      case SHORT:
+        return Bytes.toBytes( (Short)value );
+      case INTEGER:
+        return Bytes.toBytes( (Integer)value );
+      case LONG:
+        return Bytes.toBytes( (Long)value );
+      case FLOAT:
+        return Bytes.toBytes( (Float)value );
+      case DOUBLE:
+        return Bytes.toBytes( (Double)value );
+      case STRING:
+        return Bytes.toBytes( (String)value );
+      default:
+        throw new IllegalArgumentException("Unsupported type: " + type);
     }
-    throw new IllegalArgumentException( "Unsupported type: " + type );
+  }
+
+  public Object toValue( byte[] bytes )
+  {
+    final SupportType type = getType();
+    switch (type) {
+      case BOOLEAN:
+        return Bytes.toBoolean( bytes );
+      case SHORT:
+        return Bytes.toShort( bytes );
+      case INTEGER:
+        return Bytes.toInt( bytes );
+      case LONG:
+        return Bytes.toLong( bytes );
+      case FLOAT:
+        return Bytes.toFloat( bytes );
+      case DOUBLE:
+        return Bytes.toDouble( bytes );
+      case STRING:
+        return Bytes.toString( bytes );
+      default:
+        throw new IllegalArgumentException("Unsupported type: " + type);
+    }
   }
 
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldValueGenerator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldValueGenerator.java
index fea56a0..a49399e 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldValueGenerator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseFieldValueGenerator.java
@@ -16,12 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.util.List;
 
-import com.datatorrent.lib.util.FieldValueGenerator;
-import com.datatorrent.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.FieldValueGenerator;
+import org.apache.apex.malhar.lib.util.PojoUtils;
 
 /**
  * A {@link FieldValueGenerator} implementation for {@link HBaseFieldInfo}
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseGetOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseGetOperator.java
index 37270d5..12608c8 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseGetOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseGetOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Get;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseInputOperator.java
index 6f11621..82e50b4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseInputOperator.java
@@ -16,10 +16,11 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
+
+import org.apache.apex.malhar.lib.db.AbstractStoreInputOperator;
 
 import com.datatorrent.api.InputOperator;
-import com.datatorrent.lib.db.AbstractStoreInputOperator;
 
 /**
  * A base implementation of hbase input operator which derives from HBaseOperatorBase. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseNameValueCsvPutOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseNameValueCsvPutOperator.java
index d35281c..5fd97e1 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseNameValueCsvPutOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseNameValueCsvPutOperator.java
@@ -16,45 +16,48 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.util.ReusableStringReader;
-import org.apache.hadoop.hbase.client.Put;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.supercsv.io.CsvListReader;
-import org.supercsv.io.ICsvListReader;
-import org.supercsv.prefs.CsvPreference;
-
-import javax.validation.constraints.NotNull;
 import java.io.IOException;
 import java.io.StringReader;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Map;
+import javax.validation.constraints.NotNull;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.supercsv.io.CsvListReader;
+import org.supercsv.io.ICsvListReader;
+import org.supercsv.prefs.CsvPreference;
+import org.apache.apex.malhar.lib.util.ReusableStringReader;
+import org.apache.hadoop.hbase.client.Put;
+
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.netlet.util.DTThrowable;
 
 /**
- * An implementation of HBase window put operator that inserts a string of key value pairs which are mapped to corresponding rows, columns.
+ * An implementation of HBase window put operator that inserts a string of key
+ * value pairs which are mapped to corresponding rows, columns.
  * <p>
  * Accepts a string of key value pairs containing the data to be inserted.These
  * are mapped to corresponding rows,column families and columns using a property
  * file and inserted into hbase Example: input string will be of the form
- * name="xyz", st="patrickhenry" ,ct="fremont", sa="california", the properties will contain
- * properties of form name=row, sa=address.street, ct=address.city, sa=address.state.
- * With the above mapping a row xyz is created. The value patrickhenry is inserted into
- * columnfamily address and column street of row xyz. Other values are inserted
- * similarly.
+ * name="xyz", st="patrickhenry" ,ct="fremont", sa="california", the properties
+ * will contain properties of form name=row, sa=address.street, ct=address.city,
+ * sa=address.state. With the above mapping a row xyz is created. The value
+ * patrickhenry is inserted into columnfamily address and column street of row
+ * xyz. Other values are inserted similarly.
  *
  * @displayName HBase Name Value Csv Put
  * @category Output
  * @tags csv, hbase, put
  * @since 1.0.2
  */
-public class HBaseNameValueCsvPutOperator extends AbstractHBaseWindowPutOutputOperator<String> {
-
-  private class ColDef {
+public class HBaseNameValueCsvPutOperator extends AbstractHBaseWindowPutOutputOperator<String>
+{
+  private class ColDef
+  {
     String colFam;
     String colName;
   }
@@ -66,26 +69,29 @@
   private transient Map<String, ColDef> colMap = new HashMap<String, ColDef>();
   private transient Map<String, String> linemap = new HashMap<String, String>();
   private transient ICsvListReader lineListReader = null;
-  private transient ReusableStringReader lineSr=new ReusableStringReader();
+  private transient ReusableStringReader lineSr = new ReusableStringReader();
   private transient ArrayList<String> csvLineList = new ArrayList<String>();
 
-  public void setMapping(String mapping) {
+  public void setMapping(String mapping)
+  {
     this.mapping = mapping;
   }
 
   @Override
-  public Put operationPut(String t) {
+  public Put operationPut(String t)
+  {
     return parseLine(t);
   }
 
-  public void parseMapping() {
+  public void parseMapping()
+  {
     ICsvListReader listReader = null;
-    StringReader sr=null;
+    StringReader sr = null;
     ArrayList<String> csvList = new ArrayList<String>();
     try {
-      sr=new StringReader(mapping);
+      sr = new StringReader(mapping);
       listReader = new CsvListReader(sr,CsvPreference.STANDARD_PREFERENCE);
-      csvList = (ArrayList<String>) listReader.read();
+      csvList = (ArrayList<String>)listReader.read();
     } catch (IOException e) {
       logger.error("Cannot read the mapping string", e);
       DTThrowable.rethrow(e);
@@ -110,11 +116,12 @@
     }
   }
 
-  public Put parseLine(String s) {
+  public Put parseLine(String s)
+  {
     Put put = null;
     try {
       lineSr.open(s);
-      csvLineList = (ArrayList<String>) lineListReader.read();
+      csvLineList = (ArrayList<String>)lineListReader.read();
     } catch (IOException e) {
       logger.error("Cannot read the property string", e);
       DTThrowable.rethrow(e);
@@ -144,14 +151,16 @@
   }
 
   @Override
-  public void setup(OperatorContext context) {
+  public void setup(OperatorContext context)
+  {
     super.setup(context);
     parseMapping();
     lineListReader = new CsvListReader(lineSr,CsvPreference.STANDARD_PREFERENCE);
   }
 
   @Override
-  public void teardown() {
+  public void teardown()
+  {
     super.teardown();
     try {
       lineSr.close();
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseOperatorBase.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseOperatorBase.java
index 8bed824..04a51b4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseOperatorBase.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseOperatorBase.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
@@ -103,7 +103,8 @@
    * Get the configuration.
    * @return The configuration
    */
-  public Configuration getConfiguration() {
+  public Configuration getConfiguration()
+  {
     return configuration;
   }
 
@@ -113,7 +114,8 @@
    * should be called before using the configuration or table.
    * @throws IOException
    */
-  protected void setupConfiguration() throws IOException {
+  protected void setupConfiguration() throws IOException
+  {
     configuration = HBaseConfiguration.create();
     configuration.set("hbase.zookeeper.quorum", zookeeperQuorum);
     configuration.set("hbase.zookeeper.property.clientPort", "" + zookeeperClientPort);
@@ -127,7 +129,8 @@
    * @return The HBase table if configuration setup was successful, null otherwise
    * @throws IOException
    */
-  protected HTable getTable() throws IOException {
+  protected HTable getTable() throws IOException
+  {
     return table;
   }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOInputOperator.java
index e459ec7..cb36dbf 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOInputOperator.java
@@ -16,12 +16,18 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.util.List;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.util.FieldValueGenerator;
+import org.apache.apex.malhar.lib.util.FieldValueGenerator.ValueConverter;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Setter;
+import org.apache.apex.malhar.lib.util.TableInfo;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -29,14 +35,8 @@
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 
-import com.datatorrent.lib.util.FieldValueGenerator;
-import com.datatorrent.lib.util.FieldValueGenerator.ValueConverter;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Setter;
-import com.datatorrent.lib.util.TableInfo;
 import com.datatorrent.api.Context;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.api.Context.OperatorContext;
 
 /**
  * HBasePOJOInputOperator reads data from a HBase store, converts it to a POJO and puts it on the output port.
@@ -96,20 +96,20 @@
   {
     try {
       String readRow = Bytes.toString(result.getRow());
-      if( readRow.equals( getLastReadRow() )) {
+      if ( readRow.equals( getLastReadRow() )) {
         return null;
       }
 
       Object instance = pojoType.newInstance();
       rowSetter.set(instance, readRow);
 
-       List<Cell> cells = result.listCells();
-       for (Cell cell : cells) {
-         String columnName = Bytes.toString(CellUtil.cloneQualifier(cell));
-         String columnFamily = Bytes.toString(CellUtil.cloneFamily(cell));
+      List<Cell> cells = result.listCells();
+      for (Cell cell : cells) {
+        String columnName = Bytes.toString(CellUtil.cloneQualifier(cell));
+        String columnFamily = Bytes.toString(CellUtil.cloneFamily(cell));
         byte[] value = CellUtil.cloneValue(cell);
-         ((HBaseFieldValueGenerator)fieldValueGenerator).setColumnValue(instance, columnName, columnFamily, value,
-             valueConverter);
+        ((HBaseFieldValueGenerator)fieldValueGenerator).setColumnValue(instance, columnName, columnFamily, value,
+            valueConverter);
       }
 
       setLastReadRow(readRow);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOPutOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOPutOperator.java
index 37c962b..d70f3c7 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOPutOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBasePOJOPutOperator.java
@@ -16,20 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.util.List;
 
+import org.apache.apex.malhar.lib.util.FieldValueGenerator;
+import org.apache.apex.malhar.lib.util.FieldValueGenerator.FieldValueHandler;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.apex.malhar.lib.util.TableInfo;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
 
-import com.datatorrent.lib.util.FieldValueGenerator;
-import com.datatorrent.lib.util.FieldValueGenerator.FieldValueHandler;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
-import com.datatorrent.lib.util.TableInfo;
-
 /**
  * @displayName HBase Output Operator
  * @category Output
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseScanOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseScanOperator.java
index b694e67..f525058 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseScanOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseScanOperator.java
@@ -16,36 +16,38 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 import java.util.Queue;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.Scan;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
-import com.datatorrent.api.AutoMetric;
-import com.datatorrent.api.Context;
-import com.datatorrent.api.Operator;
-import com.datatorrent.api.Context.OperatorContext;
-import com.google.common.collect.Queues;
-
-import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+
+import com.google.common.collect.Queues;
+import com.datatorrent.api.AutoMetric;
+import com.datatorrent.api.Context;
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.api.Operator;
 
 /**
- * A base implementation of hbase input operator that retrieves tuples from HBase columns and provides scan operation. <br>
+ * A base implementation of hbase input operator that retrieves tuples from
+ * HBase columns and provides scan operation. <br>
  * <p>
  * <br>
- * This class provides a HBase input operator that can be used to retrieve tuples from rows in a
- * HBase table. The class should be extended by the end-operator developer. The extending class should
- * implement operationScan and getTuple methods. The operationScan method should provide a HBase Scan
- * metric object that specifies where to retrieve the tuple information from the table. The getTuple method
- * should map the contents of a Result from the Scan result to a tuple.<br>
+ * This class provides a HBase input operator that can be used to retrieve
+ * tuples from rows in a HBase table. The class should be extended by the
+ * end-operator developer. The extending class should implement operationScan
+ * and getTuple methods. The operationScan method should provide a HBase Scan
+ * metric object that specifies where to retrieve the tuple information from the
+ * table. The getTuple method should map the contents of a Result from the Scan
+ * result to a tuple.<br>
  *
  * <br>
+ *
  * @displayName HBase Scan
  * @category Output
  * @tags hbase, scan, input operator
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseStore.java
index 2b0ffa7..d46bc6f 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -27,6 +27,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.db.Connectable;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -41,7 +42,6 @@
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 
-import com.datatorrent.lib.db.Connectable;
 /**
  * A {@link Connectable} that uses HBase to connect to stores and implements Connectable interface.
  * <p>
@@ -50,15 +50,15 @@
  * @tags store, hbase
  * @since 1.0.2
  */
-public class HBaseStore implements Connectable {
-
+public class HBaseStore implements Connectable
+{
   public static final String USER_NAME_SPECIFIER = "%USER_NAME%";
 
   private static final Logger logger = LoggerFactory.getLogger(HBaseStore.class);
 
   private String zookeeperQuorum;
   private int zookeeperClientPort;
-  
+
   // Default table name if specified
   protected String tableName;
 
@@ -67,7 +67,7 @@
   // Default interval 30 min
   protected long reloginCheckInterval = 30 * 60 * 1000;
   protected transient Thread loginRenewer;
-  private volatile transient boolean doRelogin;
+  private transient volatile boolean doRelogin;
 
   protected transient HTable table;
   // Multi - table
@@ -76,13 +76,14 @@
 
   @Min(1)
   protected int maxOpenTables = Integer.MAX_VALUE;
-  
+
   /**
    * Get the zookeeper quorum location.
    *
    * @return The zookeeper quorum location
    */
-  public String getZookeeperQuorum() {
+  public String getZookeeperQuorum()
+  {
     return zookeeperQuorum;
   }
 
@@ -92,7 +93,8 @@
    * @param zookeeperQuorum
    *            The zookeeper quorum location
    */
-  public void setZookeeperQuorum(String zookeeperQuorum) {
+  public void setZookeeperQuorum(String zookeeperQuorum)
+  {
     this.zookeeperQuorum = zookeeperQuorum;
   }
 
@@ -101,7 +103,8 @@
    *
    * @return The zookeeper client port
    */
-  public int getZookeeperClientPort() {
+  public int getZookeeperClientPort()
+  {
     return zookeeperClientPort;
   }
 
@@ -111,7 +114,8 @@
    * @param zookeeperClientPort
    *            The zookeeper client port
    */
-  public void setZookeeperClientPort(int zookeeperClientPort) {
+  public void setZookeeperClientPort(int zookeeperClientPort)
+  {
     this.zookeeperClientPort = zookeeperClientPort;
   }
 
@@ -120,7 +124,8 @@
    *
    * @return The HBase table name
    */
-  public String getTableName() {
+  public String getTableName()
+  {
     return tableName;
   }
 
@@ -130,7 +135,8 @@
    * @param tableName
    *            The HBase table name
    */
-  public void setTableName(String tableName) {
+  public void setTableName(String tableName)
+  {
     this.tableName = tableName;
   }
 
@@ -199,8 +205,8 @@
 
   /**
    * Gets the allowedtableNames
-   * 
-   * @return  allowedTableNames 
+   *
+   * @return  allowedTableNames
    */
   public String[] getAllowedTableNames()
   {
@@ -211,14 +217,15 @@
   {
     this.allowedTableNames = allowedTableNames;
   }
-  
+
   /**
    * Get the HBase table .
    *
    * @return The HBase table
    * @omitFromUI
    */
-  public HTable getTable() {
+  public HTable getTable()
+  {
     return table;
   }
 
@@ -230,16 +237,18 @@
    * @return The HBase table
    * @omitFromUI
    */
-  public HTable getTable(String tableName) {
-    if ((tableName == null) || tableName.equals(this.tableName))
+  public HTable getTable(String tableName)
+  {
+    if ((tableName == null) || tableName.equals(this.tableName)) {
       return getTable();
+    }
     try {
       return tableCache.get(tableName);
     } catch (Exception e) {
       throw Throwables.propagate(e);
     }
   }
-  
+
   public void flushTables() throws InterruptedIOException, RetriesExhaustedWithDetailsException
   {
     if (table != null) {
@@ -249,7 +258,7 @@
       flushTable(entry.getValue());
     }
   }
-  
+
   protected void flushTable(HTable table) throws InterruptedIOException, RetriesExhaustedWithDetailsException
   {
     table.flushCommits();
@@ -260,7 +269,8 @@
    *
    * @return The configuration
    */
-  public Configuration getConfiguration() {
+  public Configuration getConfiguration()
+  {
     return configuration;
   }
 
@@ -270,14 +280,16 @@
    * @param configuration
    *            The configuration
    */
-  public void setConfiguration(Configuration configuration) {
+  public void setConfiguration(Configuration configuration)
+  {
     this.configuration = configuration;
   }
 
   protected transient Configuration configuration;
 
   @Override
-  public void connect() throws IOException {
+  public void connect() throws IOException
+  {
     if ((principal != null) && (keytabPath != null)) {
       String lprincipal = evaluateProperty(principal);
       String lkeytabPath = evaluateProperty(keytabPath);
@@ -317,12 +329,12 @@
     if (zookeeperClientPort != 0) {
       configuration.set("hbase.zookeeper.property.clientPort", "" + zookeeperClientPort);
     }
-    
+
     // Connect to default table if specified
     if (tableName != null) {
       table = connectTable(tableName);
     }
-    
+
     CacheLoader<String, HTable> cacheLoader = new CacheLoader<String, HTable>()
     {
       @Override
@@ -331,7 +343,7 @@
         return loadTable(key);
       }
     };
-    
+
     RemovalListener<String, HTable> removalListener = new RemovalListener<String, HTable>()
     {
       @Override
@@ -340,9 +352,9 @@
         unloadTable(notification.getValue());
       }
     };
-    
+
     int maxCacheSize = (tableName == null) ? maxOpenTables : (maxOpenTables - 1);
-    
+
     tableCache = CacheBuilder.<String, HTable>newBuilder().maximumSize(maxCacheSize).removalListener(removalListener).build(cacheLoader);
   }
 
@@ -362,7 +374,7 @@
       logger.warn("Could not close table", e);
     }
   }
-  
+
   protected HTable connectTable(String tableName) throws IOException
   {
     HTable table = new HTable(configuration, tableName);
@@ -373,13 +385,14 @@
   private String evaluateProperty(String property) throws IOException
   {
     if (property.contains(USER_NAME_SPECIFIER)) {
-     property = property.replaceAll(USER_NAME_SPECIFIER, UserGroupInformation.getLoginUser().getShortUserName());
+      property = property.replaceAll(USER_NAME_SPECIFIER, UserGroupInformation.getLoginUser().getShortUserName());
     }
     return property;
   }
 
   @Override
-  public void disconnect() throws IOException {
+  public void disconnect() throws IOException
+  {
     if (loginRenewer != null) {
       doRelogin = false;
       loginRenewer.interrupt();
@@ -392,7 +405,8 @@
   }
 
   @Override
-  public boolean isConnected() {
+  public boolean isConnected()
+  {
     // not applicable to hbase
     return false;
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseWindowStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseWindowStore.java
index 2c9f539..38d6195 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseWindowStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/HBaseWindowStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -24,6 +24,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.db.TransactionableStore;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
@@ -36,8 +37,8 @@
 
 import com.google.common.base.Throwables;
 
-import com.datatorrent.lib.db.TransactionableStore;
 import com.datatorrent.netlet.util.DTThrowable;
+
 /**
  * <p>Provides transaction support to the operators by implementing TransactionableStore abstract methods. </p>
  * <p>
@@ -49,7 +50,8 @@
  * @tags store, transactional
  * @since 1.0.2
  */
-public class HBaseWindowStore extends HBaseStore implements TransactionableStore {
+public class HBaseWindowStore extends HBaseStore implements TransactionableStore
+{
   private static final transient Logger logger = LoggerFactory.getLogger(HBaseWindowStore.class);
   private static final String DEFAULT_ROW_NAME = "HBaseOperator_row";
   private static final String DEFAULT_COLUMN_FAMILY_NAME = "HBaseOutputOperator_cf";
@@ -63,14 +65,16 @@
   private transient String lastWindowColumnName;
   private transient byte[] lastWindowColumnBytes;
 
-  public HBaseWindowStore() {
+  public HBaseWindowStore()
+  {
     rowName = DEFAULT_ROW_NAME;
     columnFamilyName = DEFAULT_COLUMN_FAMILY_NAME;
     lastWindowColumnName = DEFAULT_LAST_WINDOW_PREFIX_COLUMN_NAME;
     constructKeys();
   }
 
-  private void constructKeys() {
+  private void constructKeys()
+  {
     rowBytes = Bytes.toBytes(rowName);
     columnFamilyBytes = Bytes.toBytes(columnFamilyName);
   }
@@ -80,7 +84,8 @@
    *
    * @return The row name
    */
-  public String getRowName() {
+  public String getRowName()
+  {
     return rowName;
   }
 
@@ -90,7 +95,8 @@
    * @param rowName
    *            The row name
    */
-  public void setRowName(String rowName) {
+  public void setRowName(String rowName)
+  {
     this.rowName = rowName;
     constructKeys();
   }
@@ -100,7 +106,8 @@
    *
    * @return The column family name
    */
-  public String getColumnFamilyName() {
+  public String getColumnFamilyName()
+  {
     return columnFamilyName;
   }
 
@@ -110,13 +117,15 @@
    * @param columnFamilyName
    *            The column family name
    */
-  public void setColumnFamilyName(String columnFamilyName) {
+  public void setColumnFamilyName(String columnFamilyName)
+  {
     this.columnFamilyName = columnFamilyName;
     constructKeys();
   }
 
   @Override
-  public void connect() throws IOException {
+  public void connect() throws IOException
+  {
     super.connect();
     HTableDescriptor tdesc = table.getTableDescriptor();
     if (!tdesc.hasFamily(columnFamilyBytes)) {
@@ -133,13 +142,14 @@
   }
 
   @Override
-  public void beginTransaction() {
+  public void beginTransaction()
+  {
     // HBase does not support transactions so this method left empty
-
   }
 
   @Override
-  public void commitTransaction() {
+  public void commitTransaction()
+  {
     try {
       flushTables();
     } catch (InterruptedIOException | RetriesExhaustedWithDetailsException e) {
@@ -148,19 +158,21 @@
   }
 
   @Override
-  public void rollbackTransaction() {
+  public void rollbackTransaction()
+  {
     // HBase does not support transactions so this method left empty
-
   }
 
   @Override
-  public boolean isInTransaction() {
+  public boolean isInTransaction()
+  {
     // HBase does not support transactions so this method left empty
     return false;
   }
 
   @Override
-  public long getCommittedWindowId(String appId, int operatorId) {
+  public long getCommittedWindowId(String appId, int operatorId)
+  {
     byte[] value = null;
     try {
       String columnKey = appId + "_" + operatorId + "_" + lastWindowColumnName;
@@ -183,12 +195,14 @@
     if (value != null) {
       long longval = Bytes.toLong(value);
       return longval;
-    } else
+    } else {
       return -1;
+    }
   }
 
   @Override
-  public void storeCommittedWindowId(String appId, int operatorId,long windowId) {
+  public void storeCommittedWindowId(String appId, int operatorId,long windowId)
+  {
     byte[] WindowIdBytes = Bytes.toBytes(windowId);
     String columnKey = appId + "_" + operatorId + "_" + lastWindowColumnName;
     lastWindowColumnBytes = Bytes.toBytes(columnKey);
@@ -206,7 +220,8 @@
   }
 
   @Override
-  public void removeCommittedWindowId(String appId, int operatorId) {
+  public void removeCommittedWindowId(String appId, int operatorId)
+  {
     // Not applicable to hbase
   }
 
diff --git a/contrib/src/main/java/com/datatorrent/contrib/hbase/OutputAdapter.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/OutputAdapter.java
similarity index 97%
rename from contrib/src/main/java/com/datatorrent/contrib/hbase/OutputAdapter.java
rename to contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/OutputAdapter.java
index 31317bd..e503ebb 100644
--- a/contrib/src/main/java/com/datatorrent/contrib/hbase/OutputAdapter.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/OutputAdapter.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
 
 import java.io.InterruptedIOException;
 
@@ -75,7 +75,9 @@
   interface Driver<T>
   {
     void processTuple(T tuple, HTable table);
+
     String getTableName(T tuple);
+
     void errorTuple(T tuple);
   }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/package-info.java
index 0ccd6c2..2df5c10 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/hbase/package-info.java
@@ -20,4 +20,4 @@
  * HBase operators and utilities.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.hbase;
+package org.apache.apex.malhar.contrib.hbase;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/PythonOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/PythonOperator.java
index a6fb42f..997c890 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/PythonOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/PythonOperator.java
@@ -16,17 +16,23 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.jython;
-
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.lib.script.ScriptOperator;
-import org.python.core.*;
-import org.python.util.PythonInterpreter;
+package org.apache.apex.malhar.contrib.jython;
 
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 
+import org.python.core.PyCode;
+import org.python.core.PyIterator;
+import org.python.core.PyObject;
+import org.python.core.PyStringMap;
+import org.python.core.PyTuple;
+import org.python.util.PythonInterpreter;
+
+import org.apache.apex.malhar.lib.script.ScriptOperator;
+
+import com.datatorrent.api.Context.OperatorContext;
+
 /**
  * An operator that executes a python script and passes the input as bindings.
  * <p></p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/package-info.java
index 08b3a3e..9fc7de7 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/jython/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.jython;
+package org.apache.apex.malhar.contrib.jython;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractExactlyOnceKafkaOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractExactlyOnceKafkaOutputOperator.java
index b11c8ef..ec082d1 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractExactlyOnceKafkaOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractExactlyOnceKafkaOutputOperator.java
@@ -16,12 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Sets;
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.common.util.Pair;
-import com.google.common.collect.Sets;
 import kafka.api.FetchRequest;
 import kafka.api.FetchRequestBuilder;
 import kafka.javaapi.FetchResponse;
@@ -32,13 +39,6 @@
 import kafka.message.MessageAndOffset;
 import kafka.producer.KeyedMessage;
 import kafka.producer.Partitioner;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.Map;
-
 
 /**
  * This is a base implementation of a Kafka output operator,
@@ -82,7 +82,6 @@
  */
 public abstract class AbstractExactlyOnceKafkaOutputOperator<T, K, V> extends AbstractKafkaOutputOperator<K, V>
 {
-
   private Map<Integer, Pair<byte[], byte[]>>  lastMsgs;
 
   private transient  Partitioner partitioner;
@@ -94,9 +93,9 @@
   {
     super.setup(context);
     try {
-      String className = (String) getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_PARTITIONER);
+      String className = (String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_PARTITIONER);
       if (className != null) {
-        partitioner = (Partitioner) Class.forName(className).newInstance();
+        partitioner = (Partitioner)Class.forName(className).newInstance();
       }
     } catch (Exception e) {
       throw new RuntimeException("Failed to initialize partitioner", e);
@@ -108,22 +107,22 @@
   /**
    * This input port receives tuples that will be written out to Kafka.
    */
-  public final transient DefaultInputPort<T> inputPort = new DefaultInputPort<T>() {
+  public final transient DefaultInputPort<T> inputPort = new DefaultInputPort<T>()
+  {
     @Override
     public void process(T tuple)
     {
       Pair<K, V> keyValue = tupleToKeyValue(tuple);
       int pid = 0;
 
-      if(partitioner!=null){
+      if (partitioner != null) {
         pid = partitioner.partition(keyValue.first, partitionNum);
       }
 
       Pair<byte[], byte[]> lastMsg = lastMsgs.get(pid);
-
-      if(lastMsg == null || compareToLastMsg(keyValue, lastMsg) > 0){
+      if (lastMsg == null || compareToLastMsg(keyValue, lastMsg) > 0) {
         getProducer().send(new KeyedMessage<K, V>(getTopic(), keyValue.first, keyValue.second));
-        sendCount ++;
+        sendCount++;
       } else {
         // ignore tuple because kafka has already had the tuple
         logger.debug("Ingore tuple " + tuple);
@@ -134,7 +133,6 @@
 
   private void initializeLastProcessingOffset()
   {
-
     // read last received kafka message
     TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
 
@@ -170,9 +168,7 @@
         key.get(keyBytes);
         lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
       }
-
     }
-
   }
 
   /**
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaInputOperator.java
index 1218f4a..9130923 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaInputOperator.java
@@ -16,42 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
-
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.api.DefaultPartition;
-import com.datatorrent.api.InputOperator;
-import com.datatorrent.api.Operator;
-import com.datatorrent.api.Operator.ActivationListener;
-import com.datatorrent.api.Partitioner;
-import com.datatorrent.api.Stats;
-import com.datatorrent.api.StatsListener;
-import com.datatorrent.api.annotation.OperatorAnnotation;
-import com.datatorrent.api.annotation.Stateless;
-import com.datatorrent.lib.util.KryoCloneUtils;
-import com.google.common.base.Joiner;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-import kafka.api.FetchRequest;
-import kafka.api.FetchRequestBuilder;
-import kafka.cluster.Broker;
-import kafka.javaapi.FetchResponse;
-import kafka.javaapi.PartitionMetadata;
-import kafka.javaapi.consumer.SimpleConsumer;
-import kafka.message.Message;
-import kafka.message.MessageAndOffset;
-
-import org.apache.apex.malhar.lib.wal.WindowDataManager;
-import org.apache.commons.lang3.StringUtils;
-import org.apache.commons.lang3.tuple.MutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.validation.Valid;
-import javax.validation.constraints.Min;
-import javax.validation.constraints.NotNull;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.io.IOException;
 import java.lang.reflect.Array;
@@ -65,7 +30,43 @@
 import java.util.Map;
 import java.util.Set;
 
-import static com.datatorrent.contrib.kafka.KafkaConsumer.KafkaMeterStatsUtil.getOffsetsForPartitions;
+import javax.validation.Valid;
+import javax.validation.constraints.Min;
+import javax.validation.constraints.NotNull;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.util.KryoCloneUtils;
+import org.apache.apex.malhar.lib.wal.WindowDataManager;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.MutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.api.DefaultPartition;
+import com.datatorrent.api.InputOperator;
+import com.datatorrent.api.Operator;
+import com.datatorrent.api.Operator.ActivationListener;
+import com.datatorrent.api.Partitioner;
+import com.datatorrent.api.Stats;
+import com.datatorrent.api.StatsListener;
+import com.datatorrent.api.annotation.OperatorAnnotation;
+import com.datatorrent.api.annotation.Stateless;
+
+import kafka.api.FetchRequest;
+import kafka.api.FetchRequestBuilder;
+import kafka.cluster.Broker;
+import kafka.javaapi.FetchResponse;
+import kafka.javaapi.PartitionMetadata;
+import kafka.javaapi.consumer.SimpleConsumer;
+import kafka.message.Message;
+import kafka.message.MessageAndOffset;
+
+import static org.apache.apex.malhar.contrib.kafka.KafkaConsumer.KafkaMeterStatsUtil.getOffsetsForPartitions;
 
 /**
  * This is a base implementation of a Kafka input operator, which consumes data from Kafka message bus.&nbsp;
@@ -226,7 +227,8 @@
    * processing of messages doesn't get stuck.
    * @return The maximum for the total size
      */
-  public long getMaxTotalMsgSizePerWindow() {
+  public long getMaxTotalMsgSizePerWindow()
+  {
     return maxTotalMsgSizePerWindow;
   }
 
@@ -236,7 +238,8 @@
    *
    * @param maxTotalMsgSizePerWindow The maximum for the total size
      */
-  public void setMaxTotalMsgSizePerWindow(long maxTotalMsgSizePerWindow) {
+  public void setMaxTotalMsgSizePerWindow(long maxTotalMsgSizePerWindow)
+  {
     this.maxTotalMsgSizePerWindow = maxTotalMsgSizePerWindow;
   }
 
@@ -256,7 +259,7 @@
     }
     this.context = context;
     operatorId = context.getId();
-    if(consumer instanceof HighlevelKafkaConsumer && !(windowDataManager instanceof WindowDataManager.NoopWindowDataManager)) {
+    if (consumer instanceof HighlevelKafkaConsumer && !(windowDataManager instanceof WindowDataManager.NoopWindowDataManager)) {
       throw new RuntimeException("Idempotency is not supported for High Level Kafka Consumer");
     }
     windowDataManager.setup(context);
@@ -289,7 +292,7 @@
       if (recoveredData != null) {
         Map<String, List<PartitionMetadata>> pms = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().topic);
         if (pms != null) {
-          SimpleKafkaConsumer cons = (SimpleKafkaConsumer) getConsumer();
+          SimpleKafkaConsumer cons = (SimpleKafkaConsumer)getConsumer();
           // add all partition request in one Fretch request together
           FetchRequestBuilder frb = new FetchRequestBuilder().clientId(cons.getClientId());
           for (Map.Entry<KafkaPartition, MutablePair<Long, Integer>> rc : recoveredData.entrySet()) {
@@ -299,13 +302,14 @@
             Iterator<PartitionMetadata> pmIterator = pmsVal.iterator();
             PartitionMetadata pm = pmIterator.next();
             while (pm.partitionId() != kp.getPartitionId()) {
-              if (!pmIterator.hasNext())
+              if (!pmIterator.hasNext()) {
                 break;
+              }
               pm = pmIterator.next();
             }
-            if (pm.partitionId() != kp.getPartitionId())
+            if (pm.partitionId() != kp.getPartitionId()) {
               continue;
-
+            }
             Broker bk = pm.leader();
 
             frb.addFetch(consumer.topic, rc.getKey().getPartitionId(), rc.getValue().left, cons.getBufferSize());
@@ -319,13 +323,14 @@
               emitTuple(kafkaMessage);
               offsetStats.put(kp, msg.offset());
               count = count + 1;
-              if (count.equals(rc.getValue().right))
+              if (count.equals(rc.getValue().right)) {
                 break;
+              }
             }
           }
         }
       }
-      if(windowId == windowDataManager.getLargestCompletedWindow()) {
+      if (windowId == windowDataManager.getLargestCompletedWindow()) {
         // Start the consumer at the largest recovery window
         SimpleKafkaConsumer cons = (SimpleKafkaConsumer)getConsumer();
         // Set the offset positions to the consumer
@@ -337,8 +342,7 @@
         cons.resetOffset(currentOffsets);
         cons.start();
       }
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       throw new RuntimeException("replay", e);
     }
   }
@@ -354,8 +358,7 @@
     if (currentWindowId > windowDataManager.getLargestCompletedWindow()) {
       try {
         windowDataManager.save(currentWindowRecoveryState, currentWindowId);
-      }
-      catch (IOException e) {
+      } catch (IOException e) {
         throw new RuntimeException("saving recovery", e);
       }
     }
@@ -397,8 +400,7 @@
 
     try {
       windowDataManager.committed(windowId);
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       throw new RuntimeException("deleting state", e);
     }
   }
@@ -453,10 +455,10 @@
       emitTotalMsgSize += message.msg.size();
       offsetStats.put(message.kafkaPart, message.offSet);
       MutablePair<Long, Integer> offsetAndCount = currentWindowRecoveryState.get(message.kafkaPart);
-      if(offsetAndCount == null) {
+      if (offsetAndCount == null) {
         currentWindowRecoveryState.put(message.kafkaPart, new MutablePair<Long, Integer>(message.offSet, 1));
       } else {
-        offsetAndCount.setRight(offsetAndCount.right+1);
+        offsetAndCount.setRight(offsetAndCount.right + 1);
       }
     }
   }
@@ -507,7 +509,7 @@
 
     boolean isInitialParitition = true;
     // check if it's the initial partition
-    if(partitions.iterator().hasNext()) {
+    if (partitions.iterator().hasNext()) {
       isInitialParitition = partitions.iterator().next().getStats() == null;
     }
 
@@ -516,7 +518,7 @@
 
     // initialize the offset
     Map<KafkaPartition, Long> initOffset = null;
-    if(isInitialParitition && offsetManager !=null){
+    if (isInitialParitition && offsetManager != null) {
       initOffset = offsetManager.loadInitialOffsets();
       logger.info("Initial offsets: {} ", "{ " + Joiner.on(", ").useForNull("").withKeyValueSeparator(": ").join(initOffset) + " }");
     }
@@ -527,95 +529,92 @@
 
     switch (strategy) {
 
-    // For the 1 to 1 mapping The framework will create number of operator partitions based on kafka topic partitions
-    // Each operator partition will consume from only one kafka partition
-    case ONE_TO_ONE:
+      // For the 1 to 1 mapping The framework will create number of operator partitions based on kafka topic partitions
+      // Each operator partition will consume from only one kafka partition
+      case ONE_TO_ONE:
 
-      if (isInitialParitition) {
-        lastRepartitionTime = System.currentTimeMillis();
-        logger.info("[ONE_TO_ONE]: Initializing partition(s)");
-        // get partition metadata for topics.
-        // Whatever operator is using high-level or simple kafka consumer, the operator always create a temporary simple kafka consumer to get the metadata of the topic
-        // The initial value of brokerList of the KafkaConsumer is used to retrieve the topic metadata
-        Map<String, List<PartitionMetadata>> kafkaPartitions = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().getTopic());
+        if (isInitialParitition) {
+          lastRepartitionTime = System.currentTimeMillis();
+          logger.info("[ONE_TO_ONE]: Initializing partition(s)");
+          // get partition metadata for topics.
+          // Whatever operator is using high-level or simple kafka consumer, the operator always create a temporary simple kafka consumer to get the metadata of the topic
+          // The initial value of brokerList of the KafkaConsumer is used to retrieve the topic metadata
+          Map<String, List<PartitionMetadata>> kafkaPartitions = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().getTopic());
 
-        // initialize the number of operator partitions according to number of kafka partitions
+          // initialize the number of operator partitions according to number of kafka partitions
 
-        newPartitions = new LinkedList<Partitioner.Partition<AbstractKafkaInputOperator<K>>>();
-        for (Map.Entry<String, List<PartitionMetadata>> kp : kafkaPartitions.entrySet()) {
-          String clusterId = kp.getKey();
-          for (PartitionMetadata pm : kp.getValue()) {
-            logger.info("[ONE_TO_ONE]: Create operator partition for cluster {}, topic {}, kafka partition {} ", clusterId, getConsumer().topic, pm.partitionId());
-            newPartitions.add(createPartition(Sets.newHashSet(new KafkaPartition(clusterId, consumer.topic, pm.partitionId())), initOffset));
-          }
-        }
-        resultPartitions = newPartitions;
-        numPartitionsChanged = true;
-      }
-      else if (newWaitingPartition.size() != 0) {
-        // add partition for new kafka partition
-        for (KafkaPartition newPartition : newWaitingPartition) {
-          logger.info("[ONE_TO_ONE]: Add operator partition for cluster {}, topic {}, partition {}", newPartition.getClusterId(), getConsumer().topic, newPartition.getPartitionId());
-          partitions.add(createPartition(Sets.newHashSet(newPartition), null));
-        }
-        newWaitingPartition.clear();
-        resultPartitions = partitions;
-        numPartitionsChanged = true;
-      }
-      break;
-    // For the 1 to N mapping The initial partition number is defined by stream application
-    // Afterwards, the framework will dynamically adjust the partition and allocate consumers to as less operator partitions as it can
-    //  and guarantee the total intake rate for each operator partition is below some threshold
-    case ONE_TO_MANY:
-
-      if (getConsumer() instanceof HighlevelKafkaConsumer) {
-        throw new UnsupportedOperationException("[ONE_TO_MANY]: The high-level consumer is not supported for ONE_TO_MANY partition strategy.");
-      }
-
-      if (isInitialParitition || newWaitingPartition.size() != 0) {
-        lastRepartitionTime = System.currentTimeMillis();
-        logger.info("[ONE_TO_MANY]: Initializing partition(s)");
-        // get partition metadata for topics.
-        // Whatever operator is using high-level or simple kafka consumer, the operator always create a temporary simple kafka consumer to get the metadata of the topic
-        // The initial value of brokerList of the KafkaConsumer is used to retrieve the topic metadata
-        Map<String, List<PartitionMetadata>> kafkaPartitions = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().getTopic());
-
-        int size = initialPartitionCount;
-        @SuppressWarnings("unchecked")
-        Set<KafkaPartition>[] kps = (Set<KafkaPartition>[]) Array.newInstance((new HashSet<KafkaPartition>()).getClass(), size);
-        int i = 0;
-        for (Map.Entry<String, List<PartitionMetadata>> en : kafkaPartitions.entrySet()) {
-          String clusterId = en.getKey();
-          for (PartitionMetadata pm : en.getValue()) {
-            if (kps[i % size] == null) {
-              kps[i % size] = new HashSet<KafkaPartition>();
+          newPartitions = new LinkedList<Partitioner.Partition<AbstractKafkaInputOperator<K>>>();
+          for (Map.Entry<String, List<PartitionMetadata>> kp : kafkaPartitions.entrySet()) {
+            String clusterId = kp.getKey();
+            for (PartitionMetadata pm : kp.getValue()) {
+              logger.info("[ONE_TO_ONE]: Create operator partition for cluster {}, topic {}, kafka partition {} ", clusterId, getConsumer().topic, pm.partitionId());
+              newPartitions.add(createPartition(Sets.newHashSet(new KafkaPartition(clusterId, consumer.topic, pm.partitionId())), initOffset));
             }
-            kps[i % size].add(new KafkaPartition(clusterId, consumer.topic, pm.partitionId()));
-            i++;
           }
+          resultPartitions = newPartitions;
+          numPartitionsChanged = true;
+        } else if (newWaitingPartition.size() != 0) {
+          // add partition for new kafka partition
+          for (KafkaPartition newPartition : newWaitingPartition) {
+            logger.info("[ONE_TO_ONE]: Add operator partition for cluster {}, topic {}, partition {}", newPartition.getClusterId(), getConsumer().topic, newPartition.getPartitionId());
+            partitions.add(createPartition(Sets.newHashSet(newPartition), null));
+          }
+          newWaitingPartition.clear();
+          resultPartitions = partitions;
+          numPartitionsChanged = true;
         }
-        size = i > size ? size : i;
-        newPartitions = new ArrayList<Partitioner.Partition<AbstractKafkaInputOperator<K>>>(size);
-        for (i = 0; i < size; i++) {
-          logger.info("[ONE_TO_MANY]: Create operator partition for kafka partition(s): {} ", StringUtils.join(kps[i], ", "));
-          newPartitions.add(createPartition(kps[i], initOffset));
-        }
-        // Add the existing partition Ids to the deleted operators
-        for (Partition<AbstractKafkaInputOperator<K>> op : partitions)
-        {
-          deletedOperators.add(op.getPartitionedInstance().operatorId);
+        break;
+      // For the 1 to N mapping The initial partition number is defined by stream application
+      // Afterwards, the framework will dynamically adjust the partition and allocate consumers to as less operator partitions as it can
+      //  and guarantee the total intake rate for each operator partition is below some threshold
+      case ONE_TO_MANY:
+
+        if (getConsumer() instanceof HighlevelKafkaConsumer) {
+          throw new UnsupportedOperationException("[ONE_TO_MANY]: The high-level consumer is not supported for ONE_TO_MANY partition strategy.");
         }
 
-        newWaitingPartition.clear();
-        resultPartitions = newPartitions;
-        numPartitionsChanged = true;
-      }
-      break;
+        if (isInitialParitition || newWaitingPartition.size() != 0) {
+          lastRepartitionTime = System.currentTimeMillis();
+          logger.info("[ONE_TO_MANY]: Initializing partition(s)");
+          // get partition metadata for topics.
+          // Whatever operator is using high-level or simple kafka consumer, the operator always create a temporary simple kafka consumer to get the metadata of the topic
+          // The initial value of brokerList of the KafkaConsumer is used to retrieve the topic metadata
+          Map<String, List<PartitionMetadata>> kafkaPartitions = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().getTopic());
 
-    case ONE_TO_MANY_HEURISTIC:
-      throw new UnsupportedOperationException("[ONE_TO_MANY_HEURISTIC]: Not implemented yet");
-    default:
-      break;
+          int size = initialPartitionCount;
+          @SuppressWarnings("unchecked")
+          Set<KafkaPartition>[] kps = (Set<KafkaPartition>[])Array.newInstance((new HashSet<KafkaPartition>()).getClass(), size);
+          int i = 0;
+          for (Map.Entry<String, List<PartitionMetadata>> en : kafkaPartitions.entrySet()) {
+            String clusterId = en.getKey();
+            for (PartitionMetadata pm : en.getValue()) {
+              if (kps[i % size] == null) {
+                kps[i % size] = new HashSet<KafkaPartition>();
+              }
+              kps[i % size].add(new KafkaPartition(clusterId, consumer.topic, pm.partitionId()));
+              i++;
+            }
+          }
+          size = i > size ? size : i;
+          newPartitions = new ArrayList<Partitioner.Partition<AbstractKafkaInputOperator<K>>>(size);
+          for (i = 0; i < size; i++) {
+            logger.info("[ONE_TO_MANY]: Create operator partition for kafka partition(s): {} ", StringUtils.join(kps[i], ", "));
+            newPartitions.add(createPartition(kps[i], initOffset));
+          }
+          // Add the existing partition Ids to the deleted operators
+          for (Partition<AbstractKafkaInputOperator<K>> op : partitions) {
+            deletedOperators.add(op.getPartitionedInstance().operatorId);
+          }
+          newWaitingPartition.clear();
+          resultPartitions = newPartitions;
+          numPartitionsChanged = true;
+        }
+        break;
+
+      case ONE_TO_MANY_HEURISTIC:
+        throw new UnsupportedOperationException("[ONE_TO_MANY_HEURISTIC]: Not implemented yet");
+      default:
+        break;
     }
 
     if (numPartitionsChanged) {
@@ -689,7 +688,7 @@
     List<KafkaConsumer.KafkaMeterStats> kmsList = new LinkedList<KafkaConsumer.KafkaMeterStats>();
     for (Stats.OperatorStats os : stats.getLastWindowedStats()) {
       if (os != null && os.counters instanceof KafkaConsumer.KafkaMeterStats) {
-        kmsList.add((KafkaConsumer.KafkaMeterStats) os.counters);
+        kmsList.add((KafkaConsumer.KafkaMeterStats)os.counters);
       }
     }
     return kmsList;
@@ -719,12 +718,12 @@
       return false;
     }
 
-    if(repartitionInterval < 0){
+    if (repartitionInterval < 0) {
       // if repartition is disabled
       return false;
     }
 
-    if(t - lastRepartitionTime < repartitionInterval) {
+    if (t - lastRepartitionTime < repartitionInterval) {
       // return false if it's still within repartitionInterval since last (re)partition
       return false;
     }
@@ -747,18 +746,18 @@
         }
 
         Map<String, List<PartitionMetadata>> partitionsMeta = KafkaMetadataUtil.getPartitionsForTopic(consumer.brokers, consumer.getTopic());
-        if(partitionsMeta == null){
+        if (partitionsMeta == null) {
           //broker(s) has temporary issue to get metadata
           return false;
         }
         for (Map.Entry<String, List<PartitionMetadata>> en : partitionsMeta.entrySet()) {
-          if(en.getValue() == null){
+          if (en.getValue() == null) {
             //broker(s) has temporary issue to get metadata
             continue;
           }
           for (PartitionMetadata pm : en.getValue()) {
             KafkaPartition pa = new KafkaPartition(en.getKey(), consumer.topic, pm.partitionId());
-            if(!existingIds.contains(pa)){
+            if (!existingIds.contains(pa)) {
               newWaitingPartition.add(pa);
             }
           }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaOutputOperator.java
index f0835c4..624c955 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaOutputOperator.java
@@ -16,18 +16,19 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
+
+import java.util.Properties;
+import javax.validation.constraints.NotNull;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.commons.lang3.StringUtils;
 
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.Operator;
 import kafka.javaapi.producer.Producer;
 import kafka.producer.ProducerConfig;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.validation.constraints.NotNull;
-import java.util.Properties;
 
 /**
  * This is the base implementation of a Kafka output operator, which writes data to the Kafka message bus.
@@ -86,7 +87,8 @@
    * setup producer configuration.
    * @return ProducerConfig
    */
-  protected ProducerConfig createKafkaProducerConfig(){
+  protected ProducerConfig createKafkaProducerConfig()
+  {
     Properties prop = new Properties();
     for (String propString : producerProperties.split(",")) {
       if (!propString.contains("=")) {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaSinglePortInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaSinglePortInputOperator.java
index 96dd599..f656807 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaSinglePortInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/AbstractKafkaSinglePortInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import com.datatorrent.api.DefaultOutputPort;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/HighlevelKafkaConsumer.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/HighlevelKafkaConsumer.java
index 85cee56..5531285 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/HighlevelKafkaConsumer.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/HighlevelKafkaConsumer.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.util.HashMap;
 import java.util.List;
@@ -30,8 +30,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Joiner;
-
 import kafka.consumer.ConsumerConfig;
 import kafka.consumer.ConsumerIterator;
 import kafka.consumer.KafkaStream;
@@ -158,8 +156,8 @@
       Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = standardConsumer.get(e.getKey()).createMessageStreams(topicCountMap);
 
       for (final KafkaStream<byte[], byte[]> stream : consumerMap.get(topic)) {
-        consumerThreadExecutor.submit(new Runnable() {
-
+        consumerThreadExecutor.submit(new Runnable()
+        {
           KafkaPartition kp = new KafkaPartition(e.getKey(), topic, -1);
 
           public void run()
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaConsumer.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaConsumer.java
index a67ff48..f59f2c8 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaConsumer.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaConsumer.java
@@ -16,21 +16,8 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
-import com.datatorrent.api.Context;
-import com.esotericsoftware.kryo.serializers.FieldSerializer.Bind;
-import com.esotericsoftware.kryo.serializers.JavaSerializer;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Maps;
-import com.google.common.collect.SetMultimap;
-import kafka.message.Message;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-
-import javax.validation.constraints.NotNull;
-import javax.validation.constraints.Pattern;
-import javax.validation.constraints.Pattern.Flag;
 import java.io.Closeable;
 import java.io.Serializable;
 import java.util.Collection;
@@ -44,6 +31,19 @@
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
+import javax.validation.constraints.NotNull;
+import javax.validation.constraints.Pattern;
+import javax.validation.constraints.Pattern.Flag;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+import com.esotericsoftware.kryo.serializers.FieldSerializer.Bind;
+import com.esotericsoftware.kryo.serializers.JavaSerializer;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Maps;
+import com.google.common.collect.SetMultimap;
+import com.datatorrent.api.Context;
+import kafka.message.Message;
 
 /**
  * Base Kafka Consumer class used by kafka input operator
@@ -52,9 +52,9 @@
  */
 public abstract class KafkaConsumer implements Closeable
 {
-  protected final static String HIGHLEVEL_CONSUMER_ID_SUFFIX = "_stream_";
+  protected static final String HIGHLEVEL_CONSUMER_ID_SUFFIX = "_stream_";
 
-  protected final static String SIMPLE_CONSUMER_ID_SUFFIX = "_partition_";
+  protected static final String SIMPLE_CONSUMER_ID_SUFFIX = "_partition_";
   private String zookeeper;
 
   public KafkaConsumer()
@@ -111,7 +111,7 @@
    * This setting is case_insensitive
    * By default it always consume from the beginning of the queue
    */
-  @Pattern(flags={Flag.CASE_INSENSITIVE}, regexp = "earliest|latest")
+  @Pattern(flags = {Flag.CASE_INSENSITIVE}, regexp = "earliest|latest")
   protected String initialOffset = "latest";
 
 
@@ -122,17 +122,18 @@
   /**
    * This method is called in setup method of the operator
    */
-  public void create(){
+  public void create()
+  {
     initBrokers();
     holdingBuffer = new ArrayBlockingQueue<KafkaMessage>(cacheSize);
   }
 
   public void initBrokers()
   {
-    if(brokers!=null){
-      return ;
+    if (brokers != null) {
+      return;
     }
-    if(zookeeperMap !=null){
+    if (zookeeperMap != null) {
       brokers = HashMultimap.create();
       for (String clusterId: zookeeperMap.keySet()) {
         try {
@@ -158,12 +159,13 @@
   /**
    * The method is called in the deactivate method of the operator
    */
-  public void stop() {
+  public void stop()
+  {
     isAlive = false;
     statsSnapShot.stop();
     holdingBuffer.clear();
     IOUtils.closeQuietly(this);
-  };
+  }
 
   /**
    * This method is called in teardown method of the operator
@@ -227,7 +229,8 @@
   }
 
 
-  final protected void putMessage(KafkaPartition partition, Message msg, long offset) throws InterruptedException{
+  protected final void putMessage(KafkaPartition partition, Message msg, long offset) throws InterruptedException
+  {
     // block from receiving more message
     holdingBuffer.put(new KafkaMessage(partition, msg, offset));
     statsSnapShot.mark(partition, msg.payloadSize());
@@ -300,7 +303,8 @@
       totalBytesPerSec = _1minAvg[1];
     }
 
-    public void updateOffsets(Map<KafkaPartition, Long> offsets){
+    public void updateOffsets(Map<KafkaPartition, Long> offsets)
+    {
       for (Entry<KafkaPartition, Long> os : offsets.entrySet()) {
         PartitionStats ps = putPartitionStatsIfNotPresent(os.getKey());
         ps.offset = os.getValue();
@@ -325,7 +329,8 @@
       ps.brokerId = brokerId;
     }
 
-    private synchronized PartitionStats putPartitionStatsIfNotPresent(KafkaPartition kp){
+    private synchronized PartitionStats putPartitionStatsIfNotPresent(KafkaPartition kp)
+    {
       PartitionStats ps = partitionStats.get(kp);
 
       if (ps == null) {
@@ -347,6 +352,7 @@
       this.msg = msg;
       this.offSet = offset;
     }
+
     public KafkaPartition getKafkaPart()
     {
       return kafkaPart;
@@ -363,8 +369,8 @@
     }
   }
 
-  public static class KafkaMeterStatsUtil {
-
+  public static class KafkaMeterStatsUtil
+  {
     public static Map<KafkaPartition, Long> getOffsetsForPartitions(List<KafkaMeterStats> kafkaMeterStats)
     {
       Map<KafkaPartition, Long> result = Maps.newHashMap();
@@ -387,11 +393,8 @@
 
   }
 
-  public static class KafkaMeterStatsAggregator implements Context.CountersAggregator, Serializable{
-
-    /**
-     *
-     */
+  public static class KafkaMeterStatsAggregator implements Context.CountersAggregator, Serializable
+  {
     private static final long serialVersionUID = 729987800215151678L;
 
     @Override
@@ -399,7 +402,7 @@
     {
       KafkaMeterStats kms = new KafkaMeterStats();
       for (Object o : countersList) {
-        if (o instanceof KafkaMeterStats){
+        if (o instanceof KafkaMeterStats) {
           KafkaMeterStats subKMS = (KafkaMeterStats)o;
           kms.partitionStats.putAll(subKMS.partitionStats);
           kms.totalBytesPerSec += subKMS.totalBytesPerSec;
@@ -411,12 +414,8 @@
 
   }
 
-  public static class PartitionStats implements Serializable {
-
-
-    /**
-     *
-     */
+  public static class PartitionStats implements Serializable
+  {
     private static final long serialVersionUID = -6572690643487689766L;
 
     public int brokerId = -1;
@@ -431,13 +430,11 @@
 
   }
 
-
-
   /**
    * A snapshot of consuming rate within 1 min
    */
-  static class SnapShot {
-
+  static class SnapShot
+  {
     // msgs/s and bytes/s for each partition
 
     /**
@@ -485,35 +482,41 @@
     }
 
 
-    public void start(){
-      if(service==null){
+    public void start()
+    {
+      if (service == null) {
         service = Executors.newScheduledThreadPool(1);
       }
-      service.scheduleAtFixedRate(new Runnable() {
+      service.scheduleAtFixedRate(new Runnable()
+      {
         @Override
         public void run()
         {
           moveNext();
-          if(last<60)last++;
+          if (last < 60) {
+            last++;
+          }
         }
       }, 1, 1, TimeUnit.SECONDS);
 
     }
 
-    public void stop(){
-      if(service!=null){
+    public void stop()
+    {
+      if (service != null) {
         service.shutdown();
       }
     }
 
-    public synchronized void mark(KafkaPartition partition, long bytes){
+    public synchronized void mark(KafkaPartition partition, long bytes)
+    {
       msgSec[cursor]++;
       msgSec[60]++;
       bytesSec[cursor] += bytes;
       bytesSec[60] += bytes;
       long[] msgv = _1_min_msg_sum_par.get(partition);
       long[] bytev = _1_min_byte_sum_par.get(partition);
-      if(msgv == null){
+      if (msgv == null) {
         msgv = new long[61];
         bytev = new long[61];
         _1_min_msg_sum_par.put(partition, msgv);
@@ -525,12 +528,13 @@
       bytev[60] += bytes;
     }
 
-    public synchronized void setupStats(KafkaMeterStats stat){
-      long[] _1minAvg = {msgSec[60]/last, bytesSec[60]/last};
+    public synchronized void setupStats(KafkaMeterStats stat)
+    {
+      long[] _1minAvg = {msgSec[60] / last, bytesSec[60] / last};
       for (Entry<KafkaPartition, long[]> item : _1_min_msg_sum_par.entrySet()) {
-        long[] msgv =item.getValue();
+        long[] msgv = item.getValue();
         long[] bytev = _1_min_byte_sum_par.get(item.getKey());
-        long[] _1minAvgPar = {msgv[60]/last, bytev[60]/last};
+        long[] _1minAvgPar = {msgv[60] / last, bytev[60] / last};
         stat.set_1minMovingAvgPerPartition(item.getKey(), _1minAvgPar);
       }
       stat.set_1minMovingAvg(_1minAvg);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaJsonEncoder.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaJsonEncoder.java
index 65373bc..eaea250 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaJsonEncoder.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaJsonEncoder.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaMetadataUtil.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaMetadataUtil.java
index b9d4b1b..9f761cd 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaMetadataUtil.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaMetadataUtil.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -28,12 +28,9 @@
 import java.util.Set;
 
 import org.I0Itec.zkclient.ZkClient;
-import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import scala.collection.JavaConversions;
-
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps.EntryTransformer;
 import com.google.common.collect.SetMultimap;
@@ -50,6 +47,8 @@
 import kafka.utils.ZKStringSerializer$;
 import kafka.utils.ZkUtils;
 
+import scala.collection.JavaConversions;
+
 /**
  * A util class used to retrieve all the metadatas for partitions/topics
  * Every method in the class creates a temporary simple kafka consumer and
@@ -69,7 +68,7 @@
   // A temporary client used to retrieve the metadata of topic/partition etc
   private static final String mdClientId = "Kafka_Metadata_Lookup_Client";
 
-  private static final int timeout=10000;
+  private static final int timeout = 10000;
 
   //buffer size for MD lookup client is 128k should be enough for most cases
   private static final int bufferSize = 128 * 1024;
@@ -97,12 +96,14 @@
    */
   public static Map<String, List<PartitionMetadata>> getPartitionsForTopic(SetMultimap<String, String> brokers, final String topic)
   {
-    return Maps.transformEntries(brokers.asMap(), new EntryTransformer<String, Collection<String>, List<PartitionMetadata>>(){
+    return Maps.transformEntries(brokers.asMap(), new EntryTransformer<String, Collection<String>, List<PartitionMetadata>>()
+    {
       @Override
       public List<PartitionMetadata> transformEntry(String key, Collection<String> bs)
       {
         return getPartitionsForTopic(new HashSet<String>(bs), topic);
-      }});
+      }
+    });
   }
 
   /**
@@ -110,8 +111,8 @@
    * @param zkHost
    * @return
    */
-  public static Set<String> getBrokers(Set<String> zkHost){
-
+  public static Set<String> getBrokers(Set<String> zkHost)
+  {
     ZkClient zkclient = new ZkClient(zkHost.iterator().next(), 30000, 30000, ZKStringSerializer$.MODULE$);
     Set<String> brokerHosts = new HashSet<String>();
     for (Broker b : JavaConversions.asJavaIterable(ZkUtils.getAllBrokersInCluster(zkclient))) {
@@ -121,7 +122,6 @@
     return brokerHosts;
   }
 
-
   /**
    * @param brokerList
    * @param topic
@@ -188,7 +188,6 @@
     }
   }
 
-
   /**
    * @param consumer
    * @param topic
@@ -216,5 +215,4 @@
     return offsets[0];
   }
 
-
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaPartition.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaPartition.java
index 9954eb3..050f0ca 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaPartition.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaPartition.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.io.Serializable;
 
@@ -25,7 +25,7 @@
  */
 public class KafkaPartition implements Serializable
 {
-  protected static final String DEFAULT_CLUSTERID = "com.datatorrent.contrib.kafka.defaultcluster";
+  protected static final String DEFAULT_CLUSTERID = "org.apache.apex.malhar.contrib.kafka.defaultcluster";
 
   @SuppressWarnings("unused")
   private KafkaPartition()
@@ -101,25 +101,33 @@
   @Override
   public boolean equals(Object obj)
   {
-    if (this == obj)
+    if (this == obj) {
       return true;
-    if (obj == null)
+    }
+    if (obj == null) {
       return false;
-    if (getClass() != obj.getClass())
+    }
+    if (getClass() != obj.getClass()) {
       return false;
-    KafkaPartition other = (KafkaPartition) obj;
+    }
+    KafkaPartition other = (KafkaPartition)obj;
     if (clusterId == null) {
-      if (other.clusterId != null)
+      if (other.clusterId != null) {
         return false;
-    } else if (!clusterId.equals(other.clusterId))
+      }
+    } else if (!clusterId.equals(other.clusterId)) {
       return false;
-    if (partitionId != other.partitionId)
+    }
+    if (partitionId != other.partitionId) {
       return false;
+    }
     if (topic == null) {
-      if (other.topic != null)
+      if (other.topic != null) {
         return false;
-    } else if (!topic.equals(other.topic))
+      }
+    } else if (!topic.equals(other.topic)) {
       return false;
+    }
     return true;
   }
 
@@ -129,6 +137,4 @@
     return "KafkaPartition [clusterId=" + clusterId + ", partitionId=" + partitionId + ", topic=" + topic + "]";
   }
 
-
-
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortByteArrayInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortByteArrayInputOperator.java
index 3bb249b..b1e5b83 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortByteArrayInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortByteArrayInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.nio.ByteBuffer;
 import kafka.message.Message;
@@ -24,28 +24,26 @@
   /**
    * @since 2.1.0
    */
-  public class KafkaSinglePortByteArrayInputOperator extends AbstractKafkaSinglePortInputOperator<byte[]>
-  {
-
+public class KafkaSinglePortByteArrayInputOperator extends AbstractKafkaSinglePortInputOperator<byte[]>
+{
     /**
      * Implement abstract method of AbstractKafkaSinglePortInputOperator
      *
      * @param message
      * @return byte Array
      */
-    @Override
-    public byte[] getTuple(Message message)
-    {
-      byte[] bytes = null;
-      try {
-        ByteBuffer buffer = message.payload();
-        bytes = new byte[buffer.remaining()];
-        buffer.get(bytes);
-      }
-      catch (Exception ex) {
-        return bytes;
-      }
+  @Override
+  public byte[] getTuple(Message message)
+  {
+    byte[] bytes = null;
+    try {
+      ByteBuffer buffer = message.payload();
+      bytes = new byte[buffer.remaining()];
+      buffer.get(bytes);
+    } catch (Exception ex) {
       return bytes;
     }
-
+    return bytes;
   }
+
+}
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortOutputOperator.java
index cc1f2d4..5035698 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import com.datatorrent.api.DefaultInputPort;
 import kafka.producer.KeyedMessage;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortStringInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortStringInputOperator.java
index 9728dc9..4bbbafa 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortStringInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/KafkaSinglePortStringInputOperator.java
@@ -16,11 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
-
-import kafka.message.Message;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.nio.ByteBuffer;
+import kafka.message.Message;
 
 /**
  * Kafka input adapter operator with a single output port, which consumes String data from the Kafka message bus.
@@ -48,8 +47,7 @@
       buffer.get(bytes);
       data = new String(bytes);
       //logger.debug("Consuming {}", data);
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       return data;
     }
     return data;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/OffsetManager.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/OffsetManager.java
index 0dee11e..602e905 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/OffsetManager.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/OffsetManager.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.util.Map;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/POJOKafkaOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/POJOKafkaOutputOperator.java
index 52c253c..b3be716 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/POJOKafkaOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/POJOKafkaOutputOperator.java
@@ -16,16 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.lang.reflect.Field;
 import javax.validation.constraints.Min;
 import javax.validation.constraints.NotNull;
+
+import org.apache.apex.malhar.lib.util.PojoUtils;
 import org.apache.commons.lang3.ClassUtils;
 import com.datatorrent.api.AutoMetric;
 import com.datatorrent.api.Context;
 import com.datatorrent.api.DefaultInputPort;
-import com.datatorrent.lib.util.PojoUtils;
 
 import kafka.producer.KeyedMessage;
 import kafka.producer.ProducerConfig;
@@ -78,7 +79,8 @@
   protected transient PojoUtils.Getter keyMethod;
   protected transient Class<?> pojoClass;
 
-  public final transient DefaultInputPort<Object> inputPort = new DefaultInputPort<Object>() {
+  public final transient DefaultInputPort<Object> inputPort = new DefaultInputPort<Object>()
+  {
     @Override
     public void setup(Context.PortContext context)
     {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/SimpleKafkaConsumer.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/SimpleKafkaConsumer.java
index 4db1d69..c0e059a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/SimpleKafkaConsumer.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/SimpleKafkaConsumer.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
 
 import java.util.Collections;
 import java.util.HashMap;
@@ -37,10 +37,11 @@
 
 import javax.validation.constraints.NotNull;
 
-import org.apache.commons.collections.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.commons.collections.CollectionUtils;
+
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.SetMultimap;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -138,59 +139,58 @@
         // stop consuming only when the consumer container is stopped or the metadata can not be refreshed
         while (consumer.isAlive && (consumer.metadataRefreshRetryLimit == -1 || consumer.retryCounter.get() < consumer.metadataRefreshRetryLimit)) {
 
-            if (kpS == null || kpS.isEmpty()) {
-              return;
-            }
+          if (kpS == null || kpS.isEmpty()) {
+            return;
+          }
 
-            FetchRequestBuilder frb = new FetchRequestBuilder().clientId(clientName);
-            // add all partition request in one Fretch request together
-            for (KafkaPartition kpForConsumer : kpS) {
-              frb.addFetch(consumer.topic, kpForConsumer.getPartitionId(), consumer.offsetTrack.get(kpForConsumer), consumer.bufferSize);
-            }
+          FetchRequestBuilder frb = new FetchRequestBuilder().clientId(clientName);
+          // add all partition request in one Fretch request together
+          for (KafkaPartition kpForConsumer : kpS) {
+            frb.addFetch(consumer.topic, kpForConsumer.getPartitionId(), consumer.offsetTrack.get(kpForConsumer), consumer.bufferSize);
+          }
 
-            FetchRequest req = frb.build();
-            if (ksc == null) {
-              if (consumer.metadataRefreshInterval > 0) {
-                Thread.sleep(consumer.metadataRefreshInterval + 1000);
-              } else {
-                Thread.sleep(100);
-              }
+          FetchRequest req = frb.build();
+          if (ksc == null) {
+            if (consumer.metadataRefreshInterval > 0) {
+              Thread.sleep(consumer.metadataRefreshInterval + 1000);
+            } else {
+              Thread.sleep(100);
             }
-            FetchResponse fetchResponse = ksc.fetch(req);
-            for (Iterator<KafkaPartition> iterator = kpS.iterator(); iterator.hasNext();) {
-              KafkaPartition kafkaPartition = iterator.next();
-              short errorCode = fetchResponse.errorCode(consumer.topic, kafkaPartition.getPartitionId());
-              if (fetchResponse.hasError() && errorCode != ErrorMapping.NoError()) {
-                // Kick off partition(s) which has error when fetch from this broker temporarily
-                // Monitor will find out which broker it goes in monitor thread
-                logger.warn("Error when consuming topic {} from broker {} with error {} ", kafkaPartition, broker,
+          }
+          FetchResponse fetchResponse = ksc.fetch(req);
+          for (Iterator<KafkaPartition> iterator = kpS.iterator(); iterator.hasNext();) {
+            KafkaPartition kafkaPartition = iterator.next();
+            short errorCode = fetchResponse.errorCode(consumer.topic, kafkaPartition.getPartitionId());
+            if (fetchResponse.hasError() && errorCode != ErrorMapping.NoError()) {
+              // Kick off partition(s) which has error when fetch from this broker temporarily
+              // Monitor will find out which broker it goes in monitor thread
+              logger.warn("Error when consuming topic {} from broker {} with error {} ", kafkaPartition, broker,
                   ErrorMapping.exceptionFor(errorCode));
-                if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
-                  long seekTo = consumer.initialOffset.toLowerCase().equals("earliest") ? OffsetRequest.EarliestTime()
+              if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
+                long seekTo = consumer.initialOffset.toLowerCase().equals("earliest") ? OffsetRequest.EarliestTime()
                     : OffsetRequest.LatestTime();
-                  seekTo = KafkaMetadataUtil.getLastOffset(ksc, consumer.topic, kafkaPartition.getPartitionId(), seekTo, clientName);
-                  logger.warn("Offset out of range error, reset offset to {}", seekTo);
-                  consumer.offsetTrack.put(kafkaPartition, seekTo);
-                  continue;
-                }
-                iterator.remove();
-                consumer.partitionToBroker.remove(kafkaPartition);
-                consumer.stats.updatePartitionStats(kafkaPartition, -1, "");
+                seekTo = KafkaMetadataUtil.getLastOffset(ksc, consumer.topic, kafkaPartition.getPartitionId(), seekTo, clientName);
+                logger.warn("Offset out of range error, reset offset to {}", seekTo);
+                consumer.offsetTrack.put(kafkaPartition, seekTo);
                 continue;
               }
-              // If the fetchResponse either has no error or the no error for $kafkaPartition get the data
-              long offset = -1l;
-              for (MessageAndOffset msg : fetchResponse.messageSet(consumer.topic, kafkaPartition.getPartitionId())) {
-                offset = msg.nextOffset();
-                consumer.putMessage(kafkaPartition, msg.message(), msg.offset());
-              }
-              if (offset != -1) {
-                consumer.offsetTrack.put(kafkaPartition, offset);
-              }
-
+              iterator.remove();
+              consumer.partitionToBroker.remove(kafkaPartition);
+              consumer.stats.updatePartitionStats(kafkaPartition, -1, "");
+              continue;
             }
+            // If the fetchResponse either has no error or the no error for $kafkaPartition get the data
+            long offset = -1L;
+            for (MessageAndOffset msg : fetchResponse.messageSet(consumer.topic, kafkaPartition.getPartitionId())) {
+              offset = msg.nextOffset();
+              consumer.putMessage(kafkaPartition, msg.message(), msg.offset());
+            }
+            if (offset != -1) {
+              consumer.offsetTrack.put(kafkaPartition, offset);
+            }
+          }
         }
-      } catch (Exception e){
+      } catch (Exception e) {
         logger.error("The consumer encounters an unrecoverable exception. Close the connection to broker {} \n Caused by {}", broker, e);
       } finally {
         if (ksc != null) {
@@ -304,7 +304,7 @@
   private Set<KafkaPartition> kps = new HashSet<KafkaPartition>();
 
   // This map maintains mapping between kafka partition and it's leader broker in realtime monitored by a thread
-  private transient final ConcurrentHashMap<KafkaPartition, Broker> partitionToBroker = new ConcurrentHashMap<KafkaPartition, Broker>();
+  private final transient ConcurrentHashMap<KafkaPartition, Broker> partitionToBroker = new ConcurrentHashMap<KafkaPartition, Broker>();
 
   /**
    * Track offset for each partition, so operator could start from the last serialized state Use ConcurrentHashMap to
@@ -326,7 +326,7 @@
     // thread to consume the kafka data
     kafkaConsumerExecutor = Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("kafka-consumer-" + topic + "-%d").build());
 
-    if(metadataRefreshInterval <= 0 || CollectionUtils.isEmpty(kps)) {
+    if (metadataRefreshInterval <= 0 || CollectionUtils.isEmpty(kps)) {
       return;
     }
 
@@ -334,7 +334,7 @@
     metadataRefreshExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setNameFormat("kafka-consumer-monitor-" + topic + "-%d").setDaemon(true).build());
 
     // start one monitor thread to monitor the leader broker change and trigger some action
-    metadataRefreshExecutor.scheduleAtFixedRate(new MetaDataMonitorTask(this) , 0, metadataRefreshInterval, TimeUnit.MILLISECONDS);
+    metadataRefreshExecutor.scheduleAtFixedRate(new MetaDataMonitorTask(this), 0, metadataRefreshInterval, TimeUnit.MILLISECONDS);
   }
 
   @Override
@@ -462,17 +462,20 @@
    * and restart failed consumer threads for the partitions.
    * Monitoring is disabled after metadataRefreshRetryLimit number of failure.
    */
-  private class MetaDataMonitorTask implements Runnable {
-
+  private class MetaDataMonitorTask implements Runnable
+  {
     private final SimpleKafkaConsumer ref;
 
-    private transient final SetMultimap<Broker, KafkaPartition> deltaPositive = HashMultimap.create();
+    private final transient SetMultimap<Broker, KafkaPartition> deltaPositive = HashMultimap.create();
 
-    private MetaDataMonitorTask(SimpleKafkaConsumer ref) {
+    private MetaDataMonitorTask(SimpleKafkaConsumer ref)
+    {
       this.ref = ref;
     }
 
-    @Override public void run() {
+    @Override
+    public void run()
+    {
       try {
         monitorMetadata();
         monitorException.set(null);
@@ -499,8 +502,9 @@
         }
 
         for (Entry<String, List<PartitionMetadata>> pmLEntry : pms.entrySet()) {
-          if (pmLEntry.getValue() == null)
+          if (pmLEntry.getValue() == null) {
             continue;
+          }
           for (PartitionMetadata pm : pmLEntry.getValue()) {
             KafkaPartition kp = new KafkaPartition(pmLEntry.getKey(), topic, pm.partitionId());
             if (!kps.contains(kp)) {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/package-info.java
index 0fd936e..baafb13 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kafka/package-info.java
@@ -19,4 +19,4 @@
 /**
  * Kafka operators and utilities.
  */
-package com.datatorrent.contrib.kafka;
+package org.apache.apex.malhar.contrib.kafka;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisInputOperator.java
index 8df3277..05ba455 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
 
 import java.io.IOException;
 import java.lang.reflect.Array;
@@ -36,6 +36,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.util.KryoCloneUtils;
 import org.apache.apex.malhar.lib.wal.WindowDataManager;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.tuple.MutablePair;
@@ -56,7 +57,6 @@
 import com.datatorrent.api.StatsListener;
 import com.datatorrent.api.annotation.Stateless;
 import com.datatorrent.common.util.Pair;
-import com.datatorrent.lib.util.KryoCloneUtils;
 
 /**
  * Base implementation of Kinesis Input Operator. Fetches records from kinesis and emits them as tuples.<br/>
@@ -75,7 +75,7 @@
  * @since 2.0.0
  */
 @SuppressWarnings("rawtypes")
-public abstract class AbstractKinesisInputOperator <T> implements InputOperator, ActivationListener<OperatorContext>, Partitioner<AbstractKinesisInputOperator>, StatsListener,Operator.CheckpointNotificationListener
+public abstract class AbstractKinesisInputOperator<T> implements InputOperator, ActivationListener<OperatorContext>, Partitioner<AbstractKinesisInputOperator>, StatsListener, Operator.CheckpointNotificationListener
 {
   private static final Logger logger = LoggerFactory.getLogger(AbstractKinesisInputOperator.class);
 
@@ -141,6 +141,7 @@
     windowDataManager = new WindowDataManager.NoopWindowDataManager();
     currentWindowRecoveryState = new HashMap<String, MutablePair<String, Integer>>();
   }
+
   /**
    * Derived class has to implement this method, so that it knows what type of message it is going to send to Malhar.
    * It converts a ByteBuffer message into a Tuple. A Tuple can be of any type (derived from Java Object) that
@@ -170,11 +171,11 @@
   {
     boolean isInitialParitition = partitions.iterator().next().getStats() == null;
     // Set the credentials to get the list of shards
-    if(isInitialParitition) {
+    if (isInitialParitition) {
       try {
         KinesisUtil.getInstance().createKinesisClient(accessKey, secretKey, endPoint);
       } catch (Exception e) {
-         throw new RuntimeException("[definePartitions]: Unable to load credentials. ", e);
+        throw new RuntimeException("[definePartitions]: Unable to load credentials. ", e);
       }
     }
     List<Shard> shards = KinesisUtil.getInstance().getShardList(getStreamName());
@@ -185,89 +186,90 @@
 
     // initialize the shard positions
     Map<String, String> initShardPos = null;
-    if(isInitialParitition && shardManager !=null){
+    if (isInitialParitition && shardManager != null) {
       initShardPos = shardManager.loadInitialShardPositions();
     }
 
     switch (strategy) {
     // For the 1 to 1 mapping The framework will create number of operator partitions based on kinesis shards
     // Each operator partition will consume from only one kinesis shard
-    case ONE_TO_ONE:
-      if (isInitialParitition) {
-        lastRepartitionTime = System.currentTimeMillis();
-        logger.info("[ONE_TO_ONE]: Initializing partition(s)");
-        // initialize the number of operator partitions according to number of shards
-        newPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>(shards.size());
-        for (int i = 0; i < shards.size(); i++) {
-          logger.info("[ONE_TO_ONE]: Create operator partition for kinesis partition: " + shards.get(i).getShardId() + ", StreamName: " + this.getConsumer().streamName);
-          newPartitions.add(createPartition(Sets.newHashSet(shards.get(i).getShardId()), initShardPos));
+      case ONE_TO_ONE:
+        if (isInitialParitition) {
+          lastRepartitionTime = System.currentTimeMillis();
+          logger.info("[ONE_TO_ONE]: Initializing partition(s)");
+          // initialize the number of operator partitions according to number of shards
+          newPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>(shards.size());
+          for (int i = 0; i < shards.size(); i++) {
+            logger.info("[ONE_TO_ONE]: Create operator partition for kinesis partition: " + shards.get(i).getShardId() + ", StreamName: " + this.getConsumer().streamName);
+            newPartitions.add(createPartition(Sets.newHashSet(shards.get(i).getShardId()), initShardPos));
+          }
+        } else if (newWaitingPartition.size() != 0) {
+          // Remove the partitions for the closed shards
+          removePartitionsForClosedShards(partitions, deletedOperators);
+          // add partition for new kinesis shard
+          for (String pid : newWaitingPartition) {
+            logger.info("[ONE_TO_ONE]: Add operator partition for kinesis partition " + pid);
+            partitions.add(createPartition(Sets.newHashSet(pid), null));
+          }
+          newWaitingPartition.clear();
+          List<WindowDataManager> managers = windowDataManager.partition(partitions.size(), deletedOperators);
+          int i = 0;
+          for (Partition<AbstractKinesisInputOperator> partition : partitions) {
+            partition.getPartitionedInstance().setWindowDataManager(managers.get(i));
+            i++;
+          }
+          return partitions;
         }
-      } else if (newWaitingPartition.size() != 0) {
-        // Remove the partitions for the closed shards
-        removePartitionsForClosedShards(partitions, deletedOperators);
-        // add partition for new kinesis shard
-        for (String pid : newWaitingPartition) {
-          logger.info("[ONE_TO_ONE]: Add operator partition for kinesis partition " + pid);
-          partitions.add(createPartition(Sets.newHashSet(pid), null));
-        }
-        newWaitingPartition.clear();
-        List<WindowDataManager> managers = windowDataManager.partition(partitions.size(), deletedOperators);
-        int i = 0;
-        for (Partition<AbstractKinesisInputOperator> partition : partitions) {
-          partition.getPartitionedInstance().setWindowDataManager(managers.get(i));
-          i++;
-        }
-        return partitions;
-      }
-      break;
+        break;
     // For the N to 1 mapping The initial partition number is defined by stream application
     // Afterwards, the framework will dynamically adjust the partition
-    case MANY_TO_ONE:
+      case MANY_TO_ONE:
       /* This case was handled into two ways.
          1. Dynamic Partition: Number of DT partitions is depends on the number of open shards.
          2. Static Partition: Number of DT partitions is fixed, whether the number of shards are increased/decreased.
       */
-      int size = initialPartitionCount;
-      if (newWaitingPartition.size() != 0) {
-        // Get the list of open shards
-        shards = getOpenShards(partitions);
-        if (shardsPerPartition > 1)
-          size = (int)Math.ceil(shards.size() / (shardsPerPartition * 1.0));
-        initShardPos = shardManager.loadInitialShardPositions();
-      }
-      @SuppressWarnings("unchecked")
-      Set<String>[] pIds = (Set<String>[]) Array.newInstance((new HashSet<String>()).getClass(), size);
-
-      newPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>(size);
-      for (int i = 0; i < shards.size(); i++) {
-        Shard pm = shards.get(i);
-        if (pIds[i % size] == null) {
-          pIds[i % size] = new HashSet<String>();
+        int size = initialPartitionCount;
+        if (newWaitingPartition.size() != 0) {
+          // Get the list of open shards
+          shards = getOpenShards(partitions);
+          if (shardsPerPartition > 1) {
+            size = (int)Math.ceil(shards.size() / (shardsPerPartition * 1.0));
+          }
+          initShardPos = shardManager.loadInitialShardPositions();
         }
-        pIds[i % size].add(pm.getShardId());
-      }
-      if (isInitialParitition) {
-        lastRepartitionTime = System.currentTimeMillis();
-        logger.info("[MANY_TO_ONE]: Initializing partition(s)");
-      } else {
-        logger.info("[MANY_TO_ONE]: Add operator partition for kinesis partition(s): " + StringUtils.join(newWaitingPartition, ", ") + ", StreamName: " + this.getConsumer().streamName);
-        newWaitingPartition.clear();
-      }
-      // Add the existing partition Ids to the deleted operators
-      for (Partition<AbstractKinesisInputOperator> op : partitions) {
-        deletedOperators.add(op.getPartitionedInstance().operatorId);
-      }
+        @SuppressWarnings("unchecked")
+        Set<String>[] pIds = (Set<String>[])Array.newInstance((new HashSet<String>()).getClass(), size);
 
-      for (int i = 0; i < pIds.length; i++) {
-        logger.info("[MANY_TO_ONE]: Create operator partition for kinesis partition(s): " + StringUtils.join(pIds[i], ", ") + ", StreamName: " + this.getConsumer().streamName);
-
-        if (pIds[i] != null) {
-          newPartitions.add(createPartition(pIds[i], initShardPos));
+        newPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>(size);
+        for (int i = 0; i < shards.size(); i++) {
+          Shard pm = shards.get(i);
+          if (pIds[i % size] == null) {
+            pIds[i % size] = new HashSet<String>();
+          }
+          pIds[i % size].add(pm.getShardId());
         }
-      }
-      break;
-    default:
-      break;
+        if (isInitialParitition) {
+          lastRepartitionTime = System.currentTimeMillis();
+          logger.info("[MANY_TO_ONE]: Initializing partition(s)");
+        } else {
+          logger.info("[MANY_TO_ONE]: Add operator partition for kinesis partition(s): " + StringUtils.join(newWaitingPartition, ", ") + ", StreamName: " + this.getConsumer().streamName);
+          newWaitingPartition.clear();
+        }
+        // Add the existing partition Ids to the deleted operators
+        for (Partition<AbstractKinesisInputOperator> op : partitions) {
+          deletedOperators.add(op.getPartitionedInstance().operatorId);
+        }
+
+        for (int i = 0; i < pIds.length; i++) {
+          logger.info("[MANY_TO_ONE]: Create operator partition for kinesis partition(s): " + StringUtils.join(pIds[i], ", ") + ", StreamName: " + this.getConsumer().streamName);
+
+          if (pIds[i] != null) {
+            newPartitions.add(createPartition(pIds[i], initShardPos));
+          }
+        }
+        break;
+      default:
+        break;
     }
     int i = 0;
     List<WindowDataManager> managers = windowDataManager.partition(partitions.size(), deletedOperators);
@@ -300,7 +302,7 @@
     List<KinesisConsumer.KinesisShardStats> kmsList = new LinkedList<KinesisConsumer.KinesisShardStats>();
     for (Stats.OperatorStats os : stats.getLastWindowedStats()) {
       if (os != null && os.counters instanceof KinesisConsumer.KinesisShardStats) {
-        kmsList.add((KinesisConsumer.KinesisShardStats) os.counters);
+        kmsList.add((KinesisConsumer.KinesisShardStats)os.counters);
       }
     }
     return kmsList;
@@ -317,12 +319,12 @@
     }
     logger.debug("Use ShardManager to update the Shard Positions");
     updateShardPositions(kstats);
-    if(repartitionInterval < 0){
+    if (repartitionInterval < 0) {
       // if repartition is disabled
       return false;
     }
 
-    if(t - lastRepartitionTime < repartitionInterval) {
+    if (t - lastRepartitionTime < repartitionInterval) {
       // return false if it's still within repartitionInterval since last (re)partition
       return false;
     }
@@ -356,19 +358,15 @@
   private void removePartitionsForClosedShards(Collection<Partition<AbstractKinesisInputOperator>> partitions, Set<Integer> deletedOperators)
   {
     List<Partition<AbstractKinesisInputOperator>> closedPartitions = new ArrayList<Partition<AbstractKinesisInputOperator>>();
-    for(Partition<AbstractKinesisInputOperator> op : partitions)
-    {
-      if(op.getPartitionedInstance().getConsumer().getClosedShards().size() ==
-          op.getPartitionedInstance().getConsumer().getNumOfShards())
-      {
+    for (Partition<AbstractKinesisInputOperator> op : partitions) {
+      if (op.getPartitionedInstance().getConsumer().getClosedShards().size() ==
+          op.getPartitionedInstance().getConsumer().getNumOfShards()) {
         closedPartitions.add(op);
         deletedOperators.add(op.getPartitionedInstance().operatorId);
       }
     }
-    if(closedPartitions.size() != 0)
-    {
-      for(Partition<AbstractKinesisInputOperator> op : closedPartitions)
-      {
+    if (closedPartitions.size() != 0) {
+      for (Partition<AbstractKinesisInputOperator> op : closedPartitions) {
         partitions.remove(op);
       }
     }
@@ -378,22 +376,21 @@
   private List<Shard> getOpenShards(Collection<Partition<AbstractKinesisInputOperator>> partitions)
   {
     List<Shard> closedShards = new ArrayList<Shard>();
-    for(Partition<AbstractKinesisInputOperator> op : partitions)
-    {
+    for (Partition<AbstractKinesisInputOperator> op : partitions) {
       closedShards.addAll(op.getPartitionedInstance().getConsumer().getClosedShards());
     }
     List<Shard> shards = KinesisUtil.getInstance().getShardList(getStreamName());
     List<Shard> openShards = new ArrayList<Shard>();
     for (Shard shard :shards) {
-      if(!closedShards.contains(shard)) {
+      if (!closedShards.contains(shard)) {
         openShards.add(shard);
       }
     }
     return openShards;
   }
+
   // Create a new partition with the shardIds and initial shard positions
-  private
-  Partition<AbstractKinesisInputOperator> createPartition(Set<String> shardIds, Map<String, String> initShardPos)
+  private Partition<AbstractKinesisInputOperator> createPartition(Set<String> shardIds, Map<String, String> initShardPos)
   {
     Partition<AbstractKinesisInputOperator> p = new DefaultPartition<AbstractKinesisInputOperator>(KryoCloneUtils.cloneObject(this));
     p.getPartitionedInstance().getConsumer().setShardIds(shardIds);
@@ -416,8 +413,7 @@
     this.context = context;
     try {
       KinesisUtil.getInstance().createKinesisClient(accessKey, secretKey, endPoint);
-    } catch(Exception e)
-    {
+    } catch (Exception e) {
       throw new RuntimeException(e);
     }
     consumer.create();
@@ -468,8 +464,7 @@
             emitTuple(new Pair<String, Record>(rc.getKey(), record));
             shardPosition.put(rc.getKey(), record.getSequenceNumber());
           }
-        } catch(Exception e)
-        {
+        } catch (Exception e) {
           throw new RuntimeException(e);
         }
       }
@@ -485,11 +480,11 @@
         getConsumer().resetShardPositions(statsData);
         consumer.start();
       }
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       throw new RuntimeException("replay", e);
     }
   }
+
   /**
    * Implement Operator Interface.
    */
@@ -500,8 +495,7 @@
       context.setCounters(getConsumer().getConsumerStats(shardPosition));
       try {
         windowDataManager.save(currentWindowRecoveryState, currentWindowId);
-      }
-      catch (IOException e) {
+      } catch (IOException e) {
         throw new RuntimeException("saving recovery", e);
       }
     }
@@ -517,7 +511,7 @@
   {
     // If it is a replay state, don't start the consumer
     if (context.getValue(OperatorContext.ACTIVATION_WINDOW_ID) != Stateless.WINDOW_ID &&
-      context.getValue(OperatorContext.ACTIVATION_WINDOW_ID) < windowDataManager.getLargestCompletedWindow()) {
+        context.getValue(OperatorContext.ACTIVATION_WINDOW_ID) < windowDataManager.getLargestCompletedWindow()) {
       return;
     }
     consumer.start();
@@ -528,8 +522,7 @@
   {
     try {
       windowDataManager.committed(windowId);
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       throw new RuntimeException("deleting state", e);
     }
   }
@@ -563,8 +556,9 @@
       return;
     }
     int count = consumer.getQueueSize();
-    if(maxTuplesPerWindow > 0)
+    if (maxTuplesPerWindow > 0) {
       count = Math.min(count, maxTuplesPerWindow - emitCount);
+    }
     for (int i = 0; i < count; i++) {
       Pair<String, Record> data = consumer.pollRecord();
       String shardId = data.getFirst();
@@ -574,7 +568,7 @@
       if (shardOffsetAndCount == null) {
         currentWindowRecoveryState.put(shardId, new MutablePair<String, Integer>(recordId, 1));
       } else {
-        shardOffsetAndCount.setRight(shardOffsetAndCount.right+1);
+        shardOffsetAndCount.setRight(shardOffsetAndCount.right + 1);
       }
       shardPosition.put(shardId, recordId);
     }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisOutputOperator.java
index d6f9d36..6563fc4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/AbstractKinesisOutputOperator.java
@@ -16,9 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.validation.constraints.Max;
+import javax.validation.constraints.Min;
+import javax.validation.constraints.NotNull;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.amazonaws.AmazonClientException;
+
 import com.amazonaws.services.kinesis.AmazonKinesisClient;
 import com.amazonaws.services.kinesis.model.PutRecordRequest;
 import com.amazonaws.services.kinesis.model.PutRecordsRequest;
@@ -28,16 +40,6 @@
 import com.datatorrent.api.Operator;
 import com.datatorrent.common.util.Pair;
 
-import javax.validation.constraints.Max;
-import javax.validation.constraints.Min;
-import javax.validation.constraints.NotNull;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
 /**
  * Base implementation of Kinesis Output Operator. Convert tuples to records and emits to Kinesis.<br/>
  *
@@ -92,7 +94,8 @@
   }
 
   @Override
-  public void endWindow() {
+  public void endWindow()
+  {
   }
 
   /**
@@ -138,13 +141,11 @@
   {
     try {
       KinesisUtil.getInstance().createKinesisClient(accessKey, secretKey, endPoint);
-    } catch(Exception e)
-    {
+    } catch (Exception e) {
       throw new RuntimeException("Unable to load Credentials", e);
     }
     this.setClient(KinesisUtil.getInstance().getClient());
-    if(isBatchProcessing)
-    {
+    if (isBatchProcessing) {
       putRecordsRequestEntryList.clear();
     }
   }
@@ -166,24 +167,19 @@
   {
     // Send out single data
     try {
-      if(isBatchProcessing)
-      {
-        if(putRecordsRequestEntryList.size() == batchSize)
-        {
+      if (isBatchProcessing) {
+        if (putRecordsRequestEntryList.size() == batchSize) {
           flushRecords();
           logger.debug( "flushed {} records.", batchSize );
         }
         addRecord(tuple);
-
       } else {
         Pair<String, V> keyValue = tupleToKeyValue(tuple);
         PutRecordRequest requestRecord = new PutRecordRequest();
         requestRecord.setStreamName(streamName);
         requestRecord.setPartitionKey(keyValue.first);
         requestRecord.setData(ByteBuffer.wrap(getRecord(keyValue.second)));
-
         client.putRecord(requestRecord);
-
       }
       sendCount++;
     } catch (AmazonClientException e) {
@@ -219,6 +215,7 @@
       throw new RuntimeException(e);
     }
   }
+
   public void setClient(AmazonKinesisClient _client)
   {
     client = _client;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayInputOperator.java
index ed05a54..1213b02 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
 
 import java.nio.ByteBuffer;
 
@@ -44,8 +44,7 @@
       byte[] bytes = new byte[bb.remaining()];
       bb.get(bytes);
       return bytes;
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayOutputOperator.java
index 3da2eb7..7c6e54a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisByteArrayOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
 
 import com.datatorrent.common.util.Pair;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisConsumer.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisConsumer.java
index 0bab4d3..f7e4936 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisConsumer.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisConsumer.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
 
 import java.io.Closeable;
 import java.io.Serializable;
@@ -34,16 +34,16 @@
 import javax.validation.constraints.Pattern;
 import javax.validation.constraints.Pattern.Flag;
 
-import com.amazonaws.services.kinesis.model.Record;
-import com.amazonaws.services.kinesis.model.Shard;
-import com.amazonaws.services.kinesis.model.ShardIteratorType;
-import com.google.common.collect.Maps;
-
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.commons.io.IOUtils;
 
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.model.Shard;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
+import com.google.common.collect.Maps;
+
 import com.datatorrent.common.util.Pair;
 
 /**
@@ -76,7 +76,7 @@
    * Latest means the current record to consume from the shard
    * By default it always consume from the beginning of the shard
    */
-  @Pattern(flags={Flag.CASE_INSENSITIVE}, regexp = "earliest|latest")
+  @Pattern(flags = {Flag.CASE_INSENSITIVE}, regexp = "earliest|latest")
   protected String initialOffset = "latest";
 
   protected transient ExecutorService consumerThreadExecutor = null;
@@ -104,15 +104,17 @@
     this(streamName);
     shardIds = newShardIds;
   }
+
   /**
    * This method is called in setup method of the operator
    */
-  public void create(){
+  public void create()
+  {
     holdingBuffer = new ArrayBlockingQueue<Pair<String, Record>>(bufferSize);
     boolean defaultSelect = (shardIds == null) || (shardIds.size() == 0);
     final List<Shard> pms = KinesisUtil.getInstance().getShardList(streamName);
     for (final Shard shId: pms) {
-      if((shardIds.contains(shId.getShardId()) || defaultSelect) && !closedShards.contains(shId)) {
+      if ((shardIds.contains(shId.getShardId()) || defaultSelect) && !closedShards.contains(shId)) {
         simpleConsumerThreads.add(shId);
       }
     }
@@ -123,7 +125,7 @@
    */
   public ShardIteratorType getIteratorType(String shardId)
   {
-    if(shardPosition.containsKey(shardId)) {
+    if (shardPosition.containsKey(shardId)) {
       return ShardIteratorType.AFTER_SEQUENCE_NUMBER;
     }
     return initialOffset.equalsIgnoreCase("earliest") ? ShardIteratorType.TRIM_HORIZON : ShardIteratorType.LATEST;
@@ -132,15 +134,18 @@
   /**
    * This method is called in the activate method of the operator
    */
-  public void start(){
+  public void start()
+  {
     isAlive = true;
     int realNumStream =  simpleConsumerThreads.size();
-    if(realNumStream == 0)
+    if (realNumStream == 0) {
       return;
+    }
 
     consumerThreadExecutor = Executors.newFixedThreadPool(realNumStream);
     for (final Shard shd : simpleConsumerThreads) {
-      consumerThreadExecutor.submit(new Runnable() {
+      consumerThreadExecutor.submit(new Runnable()
+      {
         @Override
         public void run()
         {
@@ -164,13 +169,12 @@
               } else {
                 String seqNo = "";
                 for (Record rc : records) {
-                    seqNo = rc.getSequenceNumber();
-                    putRecord(shd.getShardId(), rc);
+                  seqNo = rc.getSequenceNumber();
+                  putRecord(shd.getShardId(), rc);
                 }
                 shardPosition.put(shard.getShardId(), seqNo);
               }
-            } catch(Exception e)
-            {
+            } catch (Exception e) {
               throw new RuntimeException(e);
             }
           }
@@ -183,7 +187,7 @@
   @Override
   public void close()
   {
-    if(consumerThreadExecutor!=null) {
+    if (consumerThreadExecutor != null) {
       consumerThreadExecutor.shutdown();
     }
     simpleConsumerThreads.clear();
@@ -192,15 +196,16 @@
   /**
    * The method is called in the deactivate method of the operator
    */
-  public void stop() {
+  public void stop()
+  {
     isAlive = false;
     holdingBuffer.clear();
     IOUtils.closeQuietly(this);
   }
 
-  public void resetShardPositions(Map<String, String> shardPositions){
-
-    if(shardPositions == null){
+  public void resetShardPositions(Map<String, String> shardPositions)
+  {
+    if (shardPositions == null) {
       return;
     }
     shardPosition.clear();
@@ -233,6 +238,7 @@
     stats.updateShardStats(shardStats);
     return stats;
   }
+
   /**
    * This method is called in teardown method of the operator
    */
@@ -281,9 +287,10 @@
     return initialOffset;
   }
 
-  final protected void putRecord(String shardId, Record msg) throws InterruptedException{
+  protected final void putRecord(String shardId, Record msg) throws InterruptedException
+  {
     holdingBuffer.put(new Pair<String, Record>(shardId, msg));
-  };
+  }
 
   public Integer getRecordsLimit()
   {
@@ -322,15 +329,18 @@
     public KinesisShardStats()
     {
     }
+
     //important API for update
-    public void updateShardStats(Map<String, String> shardStats){
+    public void updateShardStats(Map<String, String> shardStats)
+    {
       for (Entry<String, String> ss : shardStats.entrySet()) {
         partitionStats.put(ss.getKey(), ss.getValue());
       }
     }
   }
 
-  public static class KinesisShardStatsUtil {
+  public static class KinesisShardStatsUtil
+  {
     public static Map<String, String> getShardStatsForPartitions(List<KinesisShardStats> kinesisshardStats)
     {
       Map<String, String> result = Maps.newHashMap();
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringInputOperator.java
index 0a067bf..958cc5c 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringInputOperator.java
@@ -16,11 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
-
-import com.amazonaws.services.kinesis.model.Record;
+package org.apache.apex.malhar.contrib.kinesis;
 
 import java.nio.ByteBuffer;
+import com.amazonaws.services.kinesis.model.Record;
 
 /**
  * Kinesis input adapter which consumes string data from Kinesis
@@ -41,8 +40,7 @@
       byte[] bytes = new byte[bb.remaining() ];
       bb.get(bytes);
       return new String(bytes);
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringOutputOperator.java
index ab6c6f8..5e8dd8d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisStringOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
 
 import com.datatorrent.common.util.Pair;
 
@@ -33,6 +33,7 @@
   {
     return tuple.getBytes();
   }
+
   @Override
   protected Pair<String, String> tupleToKeyValue(String tuple)
   {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisUtil.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisUtil.java
index 65bd75d..d4ba51c 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisUtil.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/KinesisUtil.java
@@ -16,14 +16,21 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
 
+import java.util.List;
 import com.amazonaws.AmazonClientException;
 import com.amazonaws.auth.BasicAWSCredentials;
 import com.amazonaws.services.kinesis.AmazonKinesisClient;
-import com.amazonaws.services.kinesis.model.*;
-
-import java.util.List;
+import com.amazonaws.services.kinesis.model.DescribeStreamRequest;
+import com.amazonaws.services.kinesis.model.DescribeStreamResult;
+import com.amazonaws.services.kinesis.model.GetRecordsRequest;
+import com.amazonaws.services.kinesis.model.GetRecordsResult;
+import com.amazonaws.services.kinesis.model.GetShardIteratorRequest;
+import com.amazonaws.services.kinesis.model.GetShardIteratorResult;
+import com.amazonaws.services.kinesis.model.Record;
+import com.amazonaws.services.kinesis.model.Shard;
+import com.amazonaws.services.kinesis.model.ShardIteratorType;
 
 /**
  * A util class for Amazon Kinesis. Contains the wrappers for creating client, get the shard list and records from shard.
@@ -41,10 +48,9 @@
   {
   }
 
- public static KinesisUtil getInstance()
+  public static KinesisUtil getInstance()
   {
-    if(instance == null)
-    {
+    if (instance == null) {
       instance = new KinesisUtil();
     }
     return instance;
@@ -58,15 +64,13 @@
    */
   public void createKinesisClient(String accessKey, String secretKey, String endPoint) throws Exception
   {
-    if(client == null) {
+    if (client == null) {
       try {
         client = new AmazonKinesisClient(new BasicAWSCredentials(accessKey, secretKey));
-        if(endPoint != null)
-        {
+        if (endPoint != null) {
           client.setEndpoint(endPoint);
         }
-      } catch(Exception e)
-      {
+      } catch (Exception e) {
         throw new AmazonClientException("Unable to load credentials", e);
       }
     }
@@ -79,7 +83,7 @@
    */
   public List<Shard> getShardList(String streamName)
   {
-    assert client != null:"Illegal client";
+    assert client != null : "Illegal client";
     DescribeStreamRequest describeRequest = new DescribeStreamRequest();
     describeRequest.setStreamName(streamName);
 
@@ -98,9 +102,9 @@
    * @throws AmazonClientException
    */
   public List<Record> getRecords(String streamName, Integer recordsLimit, String shId, ShardIteratorType iteratorType, String seqNo)
-      throws AmazonClientException
+    throws AmazonClientException
   {
-    assert client != null:"Illegal client";
+    assert client != null : "Illegal client";
     try {
       // Create the GetShardIteratorRequest instance and sets streamName, shardId and iteratorType to it
       GetShardIteratorRequest iteratorRequest = new GetShardIteratorRequest();
@@ -110,9 +114,9 @@
 
       // If the iteratorType is AFTER_SEQUENCE_NUMBER, set the sequence No to the iteratorRequest
       if (ShardIteratorType.AFTER_SEQUENCE_NUMBER.equals(iteratorType) ||
-          ShardIteratorType.AT_SEQUENCE_NUMBER.equals(iteratorType))
+          ShardIteratorType.AT_SEQUENCE_NUMBER.equals(iteratorType)) {
         iteratorRequest.setStartingSequenceNumber(seqNo);
-
+      }
       // Get the Response from the getShardIterator service method & get the shardIterator from that response
       GetShardIteratorResult iteratorResponse = client.getShardIterator(iteratorRequest);
       // getShardIterator() specifies the position in the shard
@@ -126,8 +130,7 @@
       // Get the Response from the getRecords service method and get the data records from that response.
       GetRecordsResult getResponse = client.getRecords(getRequest);
       return getResponse.getRecords();
-    } catch(AmazonClientException e)
-    {
+    } catch (AmazonClientException e) {
       throw new RuntimeException(e);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/ShardManager.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/ShardManager.java
index 94ce28d..cb71a50 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/ShardManager.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/ShardManager.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
 
 import java.util.HashMap;
 import java.util.Map;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/package-info.java
index 72a273c..62d820f 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/kinesis/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.kinesis;
+package org.apache.apex.malhar.contrib.kinesis;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheInputOperator.java
index 77377ac..f27c3ca 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheInputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memcache;
+package org.apache.apex.malhar.contrib.memcache;
 
-import com.datatorrent.lib.db.AbstractKeyValueStoreInputOperator;
+import org.apache.apex.malhar.lib.db.AbstractKeyValueStoreInputOperator;
 
 /**
  * This is the base implementation used for memcached input adapters.&nbsp;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheOutputOperator.java
index eadd340..4f4dbe8 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/AbstractMemcacheOutputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memcache;
+package org.apache.apex.malhar.contrib.memcache;
 
-import com.datatorrent.lib.db.AbstractStoreOutputOperator;
+import org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator;
 
 /**
  * This is the base implementation of memcached output operators.&nbsp;
@@ -31,8 +31,7 @@
  * @param <T> The tuple type.
  * @since 0.9.3
  */
-public abstract class AbstractMemcacheOutputOperator<T>
-        extends AbstractStoreOutputOperator<T, MemcacheStore>
+public abstract class AbstractMemcacheOutputOperator<T> extends AbstractStoreOutputOperator<T, MemcacheStore>
 {
   public AbstractMemcacheOutputOperator()
   {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcachePOJOOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcachePOJOOutputOperator.java
index fdee9d0..45fc118 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcachePOJOOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcachePOJOOutputOperator.java
@@ -16,18 +16,17 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memcache;
+package org.apache.apex.malhar.contrib.memcache;
 
 import java.util.List;
 
+import org.apache.apex.malhar.lib.util.FieldInfo;
+import org.apache.apex.malhar.lib.util.FieldValueGenerator;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.apex.malhar.lib.util.TableInfo;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
-import com.datatorrent.lib.util.FieldInfo;
-import com.datatorrent.lib.util.FieldValueGenerator;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
-import com.datatorrent.lib.util.TableInfo;
-
 /**
  *
  * @displayName Memcache Output Operator
@@ -36,7 +35,7 @@
  * @since 3.0.0
  */
 @Evolving
-public class MemcachePOJOOutputOperator extends AbstractMemcacheOutputOperator< Object >
+public class MemcachePOJOOutputOperator extends AbstractMemcacheOutputOperator<Object>
 {
   private static final long serialVersionUID = 5290158463990158290L;
   private TableInfo<FieldInfo> tableInfo;
@@ -53,8 +52,7 @@
 
     final List<FieldInfo> fieldsInfo = tableInfo.getFieldsInfo();
     Object value = tuple;
-    if( fieldsInfo != null )
-    {
+    if ( fieldsInfo != null ) {
       if (fieldValueGenerator == null) {
         fieldValueGenerator = FieldValueGenerator.getFieldValueGenerator(tuple.getClass(), fieldsInfo);
       }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcacheStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcacheStore.java
index 1465f03..760c512 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcacheStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/MemcacheStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memcache;
+package org.apache.apex.malhar.contrib.memcache;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -25,11 +25,11 @@
 import java.util.Map;
 import java.util.concurrent.Future;
 
-import net.spy.memcached.MemcachedClient;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.db.KeyValueStore;
 
-import com.datatorrent.lib.db.KeyValueStore;
+import net.spy.memcached.MemcachedClient;
 
 /**
  * Provides the implementation of a Memcache store.
@@ -91,8 +91,7 @@
   {
     if (serverAddresses.isEmpty()) {
       memcacheClient = new MemcachedClient(new InetSocketAddress("localhost", 11211));
-    }
-    else {
+    } else {
       memcacheClient = new MemcachedClient(serverAddresses);
     }
   }
@@ -140,14 +139,12 @@
     return results;
   }
 
-  @SuppressWarnings("unchecked")
   @Override
   public void put(Object key, Object value)
   {
     try {
       memcacheClient.set(key.toString(), keyExpiryTime, value).get();
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
   }
@@ -162,8 +159,7 @@
     for (Future<?> future : futures) {
       try {
         future.get();
-      }
-      catch (Exception ex) {
+      } catch (Exception ex) {
         throw new RuntimeException(ex);
       }
     }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/package-info.java
index d1776dd..8132ea3 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache/package-info.java
@@ -20,4 +20,4 @@
  * Memcache operators and utilities.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.memcache;
+package org.apache.apex.malhar.contrib.memcache;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheInputOperator.java
index 116368f..3887e09 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheInputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memcache_whalin;
+package org.apache.apex.malhar.contrib.memcache_whalin;
 
-import com.datatorrent.lib.db.AbstractKeyValueStoreInputOperator;
+import org.apache.apex.malhar.lib.db.AbstractKeyValueStoreInputOperator;
 
 /**
  * This is the base implementation used for memcached input adapters.&nbsp;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheOutputOperator.java
index 010571d..9859f5a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/AbstractMemcacheOutputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memcache_whalin;
+package org.apache.apex.malhar.contrib.memcache_whalin;
 
-import com.datatorrent.lib.db.AbstractStoreOutputOperator;
+import org.apache.apex.malhar.lib.db.AbstractStoreOutputOperator;
 
 /**
  * This is the base implementation of memcached output operators.&nbsp;
@@ -31,8 +31,7 @@
  * @param <T> The tuple type.
  * @since 0.9.3
  */
-public abstract class AbstractMemcacheOutputOperator<T>
-        extends AbstractStoreOutputOperator<T, MemcacheStore>
+public abstract class AbstractMemcacheOutputOperator<T> extends AbstractStoreOutputOperator<T, MemcacheStore>
 {
   public AbstractMemcacheOutputOperator()
   {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/MemcacheStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/MemcacheStore.java
index 90d75bd..175e1c3 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/MemcacheStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/MemcacheStore.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memcache_whalin;
+package org.apache.apex.malhar.contrib.memcache_whalin;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -27,11 +27,11 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.db.KeyValueStore;
+
 import com.whalin.MemCached.MemCachedClient;
 import com.whalin.MemCached.SockIOPool;
 
-import com.datatorrent.lib.db.KeyValueStore;
-
 /**
  * Provides the implementation of a Memcache store.
  *
@@ -81,8 +81,7 @@
     pool = SockIOPool.getInstance();
     if (serverAddresses.isEmpty()) {
       pool.setServers(new String[]{"localhost:11211"});
-    }
-    else {
+    } else {
       pool.setServers(serverAddresses.toArray(new String[] {}));
     }
     pool.initialize();
@@ -121,7 +120,6 @@
    * @param keys
    * @return All values for the given keys.
    */
-  @SuppressWarnings("unchecked")
   @Override
   public List<Object> getAll(List<Object> keys)
   {
@@ -132,14 +130,12 @@
     return results;
   }
 
-  @SuppressWarnings("unchecked")
   @Override
   public void put(Object key, Object value)
   {
     try {
       memcacheClient.set(key.toString(), value, keyExpiryTime);
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       throw new RuntimeException(ex);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/package-info.java
index e5442d6..cc87b21 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memcache_whalin/package-info.java
@@ -20,4 +20,4 @@
  * Memcache operators and utilities using whalin library.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.memcache_whalin;
+package org.apache.apex.malhar.contrib.memcache_whalin;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlInputOperator.java
index 05ce019..d1465b8 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlInputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memsql;
+package org.apache.apex.malhar.contrib.memsql;
 
-import com.datatorrent.lib.db.jdbc.AbstractJdbcInputOperator;
+import org.apache.apex.malhar.lib.db.jdbc.AbstractJdbcInputOperator;
 
 /**
  * This is an input operator that reads from a memsql database.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlOutputOperator.java
index f6391ab..6ddbaf8 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/AbstractMemsqlOutputOperator.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memsql;
+package org.apache.apex.malhar.contrib.memsql;
 
-import com.datatorrent.lib.db.jdbc.AbstractJdbcNonTransactionableBatchOutputOperator;
+import org.apache.apex.malhar.lib.db.jdbc.AbstractJdbcNonTransactionableBatchOutputOperator;
 
 /**
  * This is an output operator that connects to a memsql database.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOInputOperator.java
index 2f75575..6d07228 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOInputOperator.java
@@ -16,20 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memsql;
-
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Setter;
-import com.datatorrent.lib.util.PojoUtils.SetterBoolean;
-import com.datatorrent.lib.util.PojoUtils.SetterDouble;
-import com.datatorrent.lib.util.PojoUtils.SetterFloat;
-import com.datatorrent.lib.util.PojoUtils.SetterInt;
-import com.datatorrent.lib.util.PojoUtils.SetterLong;
-import com.datatorrent.lib.util.PojoUtils.SetterShort;
+package org.apache.apex.malhar.contrib.memsql;
 
 import java.math.BigDecimal;
-import java.sql.*;
+import java.sql.Date;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -38,9 +33,18 @@
 import javax.validation.constraints.Min;
 import javax.validation.constraints.NotNull;
 
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Setter;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterBoolean;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterDouble;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterFloat;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterInt;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterLong;
+import org.apache.apex.malhar.lib.util.PojoUtils.SetterShort;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import com.datatorrent.api.Context.OperatorContext;
 
 /**
  * <p>
@@ -262,8 +266,7 @@
         String javaType;
         if (memsqlType.contains("(")) {
           javaType = memsqlType.substring(0, memsqlType.indexOf('(')).toUpperCase();
-        }
-        else {
+        } else {
           javaType = memsqlType.toUpperCase();
         }
 
@@ -286,16 +289,14 @@
       }
 
       statement.close();
-    }
-    catch (SQLException ex) {
+    } catch (SQLException ex) {
       throw new RuntimeException(ex);
     }
 
     try {
       // This code will be replaced after integration of creating POJOs on the fly utility.
       objectClass = Class.forName(outputClass);
-    }
-    catch (ClassNotFoundException ex) {
+    } catch (ClassNotFoundException ex) {
       throw new RuntimeException(ex);
     }
 
@@ -324,11 +325,9 @@
     try {
       // This code will be replaced after integration of creating POJOs on the fly utility.
       obj = objectClass.newInstance();
-    }
-    catch (InstantiationException ex) {
+    } catch (InstantiationException ex) {
       throw new RuntimeException(ex);
-    }
-    catch (IllegalAccessException ex) {
+    } catch (IllegalAccessException ex) {
       throw new RuntimeException(ex);
     }
     final int size = columns.size();
@@ -339,35 +338,25 @@
         Class<?> classType = columnNameToClassMapping.get(columnName);
         if (classType == String.class) {
           ((Setter<Object, String>)setters.get(i)).set(obj, result.getString(columnName));
-        }
-        else if (classType == int.class) {
+        } else if (classType == int.class) {
           ((SetterInt)setters.get(i)).set(obj, result.getInt(columnName));
-        }
-        else if (classType == Boolean.class) {
+        } else if (classType == Boolean.class) {
           ((SetterBoolean)setters.get(i)).set(obj, result.getBoolean(columnName));
-        }
-        else if (classType == Short.class) {
+        } else if (classType == Short.class) {
           ((SetterShort)setters.get(i)).set(obj, result.getShort(columnName));
-        }
-        else if (classType == Long.class) {
+        } else if (classType == Long.class) {
           ((SetterLong)setters.get(i)).set(obj, result.getLong(columnName));
-        }
-        else if (classType == Float.class) {
+        } else if (classType == Float.class) {
           ((SetterFloat)setters.get(i)).set(obj, result.getFloat(columnName));
-        }
-        else if (classType == Double.class) {
+        } else if (classType == Double.class) {
           ((SetterDouble)setters.get(i)).set(obj, result.getDouble(columnName));
-        }
-        else if (classType == BigDecimal.class) {
+        } else if (classType == BigDecimal.class) {
           ((Setter<Object, BigDecimal>)setters.get(i)).set(obj, result.getBigDecimal(columnName));
-        }
-        else if (classType == Date.class) {
+        } else if (classType == Date.class) {
           ((Setter<Object, Date>)setters.get(i)).set(obj, result.getDate(columnName));
-        }
-        else if (classType == Timestamp.class) {
+        } else if (classType == Timestamp.class) {
           ((Setter<Object, Timestamp>)setters.get(i)).set(obj, result.getTimestamp(columnName));
-        }
-        else {
+        } else {
           throw new RuntimeException("unsupported data type ");
         }
       }
@@ -375,28 +364,20 @@
       if (result.isLast()) {
         logger.debug("last row is {}", lastRowKey);
         if (primaryKeyColumnType == int.class) {
-
           lastRowKey = result.getInt(primaryKeyColumn);
-        }
-        else if (primaryKeyColumnType == Long.class) {
-
+        } else if (primaryKeyColumnType == Long.class) {
           lastRowKey = result.getLong(primaryKeyColumn);
-        }
-        else if (primaryKeyColumnType == Float.class) {
+        } else if (primaryKeyColumnType == Float.class) {
           lastRowKey = result.getFloat(primaryKeyColumn);
-        }
-        else if (primaryKeyColumnType == Double.class) {
+        } else if (primaryKeyColumnType == Double.class) {
           lastRowKey = result.getDouble(primaryKeyColumn);
-        }
-        else if (primaryKeyColumnType == Short.class) {
+        } else if (primaryKeyColumnType == Short.class) {
           lastRowKey = result.getShort(primaryKeyColumn);
-        }
-        else {
+        } else {
           throw new RuntimeException("unsupported data type ");
         }
       }
-    }
-    catch (SQLException ex) {
+    } catch (SQLException ex) {
       throw new RuntimeException(ex);
     }
     return obj;
@@ -413,8 +394,7 @@
     String parameterizedQuery;
     if (query.contains("%s")) {
       parameterizedQuery = query.replace("%s", startRow + "");
-    }
-    else {
+    } else {
       parameterizedQuery = query;
     }
     return parameterizedQuery;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOOutputOperator.java
index b933b3b..8859051 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlPOJOOutputOperator.java
@@ -16,27 +16,32 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memsql;
+package org.apache.apex.malhar.contrib.memsql;
 
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.GetterBoolean;
-import com.datatorrent.lib.util.PojoUtils.GetterChar;
-import com.datatorrent.lib.util.PojoUtils.GetterDouble;
-import com.datatorrent.lib.util.PojoUtils.GetterFloat;
-import com.datatorrent.lib.util.PojoUtils.GetterInt;
-import com.datatorrent.lib.util.PojoUtils.GetterLong;
-import com.datatorrent.lib.util.PojoUtils.Getter;
-import com.datatorrent.lib.util.PojoUtils.GetterShort;
-
-import java.sql.*;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Types;
 import java.util.ArrayList;
 
 import javax.validation.constraints.NotNull;
 
-import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterBoolean;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterChar;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterDouble;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterFloat;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterInt;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterLong;
+import org.apache.apex.malhar.lib.util.PojoUtils.GetterShort;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import com.datatorrent.api.Context.OperatorContext;
 
 /**
  * A generic implementation of AbstractMemsqlOutputOperator which can take in a POJO.
@@ -146,7 +151,6 @@
     try {
       Statement st = conn.createStatement();
       ResultSet rs = st.executeQuery("select * from " + tablename);
-
       ResultSetMetaData rsMetaData = rs.getMetaData();
 
       int numberOfColumns;
@@ -161,8 +165,7 @@
         columnDataTypes.add(type);
         LOG.debug("sql column type is " + type);
       }
-    }
-    catch (SQLException ex) {
+    } catch (SQLException ex) {
       throw new RuntimeException(ex);
     }
 
@@ -252,29 +255,29 @@
       switch (type) {
         case (Types.CHAR):
           // TODO: verify that memsql driver handles char as int
-          statement.setInt(i+1, ((GetterChar<Object>) getters.get(i)).get(tuple));
+          statement.setInt(i + 1, ((GetterChar<Object>)getters.get(i)).get(tuple));
           break;
         case (Types.VARCHAR):
-          statement.setString(i+1, ((Getter<Object, String>) getters.get(i)).get(tuple));
+          statement.setString(i + 1, ((Getter<Object, String>)getters.get(i)).get(tuple));
           break;
         case (Types.BOOLEAN):
         case (Types.TINYINT):
-          statement.setBoolean(i+1, ((GetterBoolean<Object>) getters.get(i)).get(tuple));
+          statement.setBoolean(i + 1, ((GetterBoolean<Object>)getters.get(i)).get(tuple));
           break;
         case (Types.SMALLINT):
-          statement.setShort(i+1, ((GetterShort<Object>) getters.get(i)).get(tuple));
+          statement.setShort(i + 1, ((GetterShort<Object>)getters.get(i)).get(tuple));
           break;
         case (Types.INTEGER):
-          statement.setInt(i+1, ((GetterInt<Object>) getters.get(i)).get(tuple));
+          statement.setInt(i + 1, ((GetterInt<Object>)getters.get(i)).get(tuple));
           break;
         case (Types.BIGINT):
-          statement.setLong (i+1, ((GetterLong<Object>) getters.get(i)).get(tuple));
+          statement.setLong(i + 1, ((GetterLong<Object>)getters.get(i)).get(tuple));
           break;
         case (Types.FLOAT):
-          statement.setFloat(i+1, ((GetterFloat<Object>) getters.get(i)).get(tuple));
+          statement.setFloat(i + 1, ((GetterFloat<Object>)getters.get(i)).get(tuple));
           break;
         case (Types.DOUBLE):
-          statement.setDouble(i+1, ((GetterDouble<Object>) getters.get(i)).get(tuple));
+          statement.setDouble(i + 1, ((GetterDouble<Object>)getters.get(i)).get(tuple));
           break;
         default:
           /*
@@ -284,12 +287,12 @@
             Types.ARRAY
             Types.OTHER
            */
-          statement.setObject(i+1, ((Getter<Object, Object>)getters.get(i)).get(tuple));
+          statement.setObject(i + 1, ((Getter<Object, Object>)getters.get(i)).get(tuple));
           break;
       }
     }
   }
 
-  private static transient final Logger LOG = LoggerFactory.getLogger(MemsqlPOJOOutputOperator.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MemsqlPOJOOutputOperator.class);
 
 }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlStore.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlStore.java
index 2fdef93..e0d93bb 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlStore.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/MemsqlStore.java
@@ -16,9 +16,9 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.memsql;
+package org.apache.apex.malhar.contrib.memsql;
 
-import com.datatorrent.lib.db.jdbc.JdbcNonTransactionalStore;
+import org.apache.apex.malhar.lib.db.jdbc.JdbcNonTransactionalStore;
 
 /**
  * A connection store for memsql which has the default connection driver set
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/package-info.java
index a23b118..59f1aa4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/memsql/package-info.java
@@ -17,4 +17,4 @@
  * under the License.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.memsql;
+package org.apache.apex.malhar.contrib.memsql;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AbstractStreamPatternMatcher.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AbstractStreamPatternMatcher.java
index 38b176c..81bfc26 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AbstractStreamPatternMatcher.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AbstractStreamPatternMatcher.java
@@ -31,7 +31,7 @@
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 import com.datatorrent.common.util.BaseOperator;
-@Deprecated
+
 /**
  * <p>
  * This operator searches for a given pattern in the input stream.<br>
@@ -56,7 +56,7 @@
  * @since 2.0.0
  * @deprecated
  */
-
+@Deprecated
 @OperatorAnnotation(partitionable = false)
 public abstract class AbstractStreamPatternMatcher<T> extends BaseOperator
 {
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AllAfterMatchMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AllAfterMatchMap.java
index 44118d5..16429e1 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AllAfterMatchMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/AllAfterMatchMap.java
@@ -21,12 +21,12 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseMatchOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.BaseMatchOperator;
-
 /**
  * This operator takes Maps, whose values are numbers, as input tuples.&nbsp;
  * It then performs a numeric comparison on the values corresponding to one of the keys in the input tuple maps.&nbsp;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/DistinctMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/DistinctMap.java
index 426c2e5..6dca102 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/DistinctMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/DistinctMap.java
@@ -21,13 +21,13 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseKeyValueOperator;
+import org.apache.apex.malhar.lib.util.UnifierHashMap;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.BaseKeyValueOperator;
-import com.datatorrent.lib.util.UnifierHashMap;
-
 /**
  * This operator computes and emits distinct key,val pairs (i.e drops duplicates).
  * <p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeyVals.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeyVals.java
index 9925d69..32d5039 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeyVals.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeyVals.java
@@ -23,13 +23,13 @@
 
 import javax.validation.constraints.NotNull;
 
+import org.apache.apex.malhar.lib.util.BaseKeyOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 import com.datatorrent.api.annotation.Stateless;
 
-import com.datatorrent.lib.util.BaseKeyOperator;
-
 /**
  * This operator filters the incoming stream of tuples using a set of specified key value pairs.&nbsp;
  * Tuples that match the filter are emitted by the operator.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysHashMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysHashMap.java
index cfee74c..0c5b753 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysHashMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysHashMap.java
@@ -23,13 +23,13 @@
 
 import javax.validation.constraints.NotNull;
 
+import org.apache.apex.malhar.lib.util.BaseKeyOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 import com.datatorrent.api.annotation.Stateless;
 
-import com.datatorrent.lib.util.BaseKeyOperator;
-
 
 /**
  * This operator filters the incoming stream of key value pairs based on the keys specified by property "keys".
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysMap.java
index 43386f0..328d7d0 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FilterKeysMap.java
@@ -23,14 +23,14 @@
 
 import javax.validation.constraints.NotNull;
 
+import org.apache.apex.malhar.lib.util.BaseKeyOperator;
+import org.apache.apex.malhar.lib.util.UnifierHashMap;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 import com.datatorrent.api.annotation.Stateless;
 
-import com.datatorrent.lib.util.BaseKeyOperator;
-import com.datatorrent.lib.util.UnifierHashMap;
-
 /**
  * This operator filters the incoming stream of key value pairs based on the keys specified by property "keys"..
  * <p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstMatchMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstMatchMap.java
index 7649706..6ea2d91 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstMatchMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstMatchMap.java
@@ -21,12 +21,12 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseMatchOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.BaseMatchOperator;
-
 /**
  * This operator filters the incoming stream of key value pairs by obtaining the values corresponding to a specified key,
  * and comparing those values to a specified number.&nbsp;The first key value pair, in each window, to satisfy the comparison is emitted.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstN.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstN.java
index f067fbb..0638fbf 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstN.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstN.java
@@ -21,13 +21,12 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.AbstractBaseNOperatorMap;
 import org.apache.commons.lang.mutable.MutableInt;
 
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.AbstractBaseNOperatorMap;
-
 /**
  * This operator filters the incoming stream of key value pairs by emitting the first N key value pairs with a specified key in each window.
  * <p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstTillMatch.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstTillMatch.java
index c32a0f2..f7081f0 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstTillMatch.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/FirstTillMatch.java
@@ -20,12 +20,12 @@
 
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.util.BaseMatchOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.BaseMatchOperator;
-
 /**
  * This operator filters the incoming stream of key value pairs by obtaining the values corresponding to a specified key,
  * and comparing those values to a specified number.&nbsp;For each window, all key value pairs are emitted by the operator until a value satisfying the comparison is encountered.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InsertSortDesc.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InsertSortDesc.java
index 4af9091..53ee21d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InsertSortDesc.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InsertSortDesc.java
@@ -22,13 +22,14 @@
 import java.util.HashMap;
 import java.util.PriorityQueue;
 
+import org.apache.apex.malhar.lib.util.AbstractBaseSortOperator;
+import org.apache.apex.malhar.lib.util.ReversibleComparator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.InputPortFieldAnnotation;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.AbstractBaseSortOperator;
-import com.datatorrent.lib.util.ReversibleComparator;
 
 /**
  * This operator takes the values it receives each window and outputs them in ascending order at the end of each window.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndex.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndex.java
index 7964ed7..3ba81d6 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndex.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndex.java
@@ -22,13 +22,13 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseKeyValueOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.Operator.Unifier;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.BaseKeyValueOperator;
-
 /**
  * This operator takes a stream of key value pairs each window,
  * and outputs a set of inverted key value pairs at the end of each window.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndexArray.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndexArray.java
index 26b77ac..556a62d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndexArray.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/InvertIndexArray.java
@@ -22,12 +22,12 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseKeyValueOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.BaseKeyValueOperator;
-
 /**
  * This operator takes a stream of key value pairs each window,
  * and outputs a set of inverted key value pairs at the end of each window.&nbsp;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LastMatchMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LastMatchMap.java
index 188c9b1..d4abdb9 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LastMatchMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LastMatchMap.java
@@ -21,12 +21,12 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseMatchOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.BaseMatchOperator;
-
 /**
  * This operator filters the incoming stream of key value pairs by obtaining the values corresponding to a specified key,
  * and comparing those values to a specified value.&nbsp;The last key value pair, in each window, to satisfy the comparison is emitted.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyMap.java
index af5229c..3c0f79c 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyMap.java
@@ -22,15 +22,15 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.AbstractBaseFrequentKey;
+import org.apache.apex.malhar.lib.util.UnifierArrayHashMapFrequent;
+import org.apache.apex.malhar.lib.util.UnifierHashMapFrequent;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
 
-import com.datatorrent.lib.util.AbstractBaseFrequentKey;
-import com.datatorrent.lib.util.UnifierArrayHashMapFrequent;
-import com.datatorrent.lib.util.UnifierHashMapFrequent;
-
 /**
  * This operator filters the incoming stream of key value pairs by finding the key or keys (if there is a tie) that occur the fewest number of times within each window.&nbsp;
  * A list of the corresponding key value pairs are then output to the port named "list" and one of the corresponding key value pairs is output to the port "least", at the end of each window.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyValueMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyValueMap.java
index 78eb6d9..c3c61c8 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyValueMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/LeastFrequentKeyValueMap.java
@@ -20,9 +20,9 @@
 
 import java.util.HashMap;
 
-import com.datatorrent.api.DefaultOutputPort;
+import org.apache.apex.malhar.lib.util.AbstractBaseFrequentKeyValueMap;
 
-import com.datatorrent.lib.util.AbstractBaseFrequentKeyValueMap;
+import com.datatorrent.api.DefaultOutputPort;
 
 /**
  * This operator filters the incoming stream of key value pairs by finding the value or values (if there is a tie),
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyMap.java
index f1ab968..cdf0d81 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyMap.java
@@ -22,14 +22,14 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.AbstractBaseFrequentKey;
+import org.apache.apex.malhar.lib.util.UnifierArrayHashMapFrequent;
+import org.apache.apex.malhar.lib.util.UnifierHashMapFrequent;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.AbstractBaseFrequentKey;
-import com.datatorrent.lib.util.UnifierArrayHashMapFrequent;
-import com.datatorrent.lib.util.UnifierHashMapFrequent;
-
 /**
  * This operator filters the incoming stream of key value pairs by finding the key or keys (if there is a tie)
  * that occur the largest number of times within each window.&nbsp;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyValueMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyValueMap.java
index 4fb6472..0545ac5 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyValueMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/MostFrequentKeyValueMap.java
@@ -20,11 +20,11 @@
 
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.util.AbstractBaseFrequentKeyValueMap;
+
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 
-import com.datatorrent.lib.util.AbstractBaseFrequentKeyValueMap;
-
 /**
  * This operator filters the incoming stream of key value pairs by finding the value or values (if there is a tie),
  * for each key, that occur the largest number of times within each window.&nbsp;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/Sampler.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/Sampler.java
index 7caf523..fb3a6cb 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/Sampler.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/Sampler.java
@@ -23,13 +23,13 @@
 import javax.validation.constraints.Max;
 import javax.validation.constraints.Min;
 
+import org.apache.apex.malhar.lib.util.BaseKeyOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
 import com.datatorrent.api.annotation.Stateless;
 
-import com.datatorrent.lib.util.BaseKeyOperator;
-
 /**
  * This operator takes a stream of tuples as input, and emits each tuple with a specified probability.
  * <p>
diff --git a/samples/src/main/java/com/datatorrent/samples/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/package-info.java
similarity index 82%
rename from samples/src/main/java/com/datatorrent/samples/package-info.java
rename to contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/package-info.java
index d1f36fd..55d7f54 100644
--- a/samples/src/main/java/com/datatorrent/samples/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/algo/package-info.java
@@ -16,10 +16,5 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-/**
- * <b>com.datatorrent.lib.samplecode</b> is a collection of libraries that have examples of various operators<p>
- * <br>
- * <br>
- */
-
-package com.datatorrent.samples;
+@org.apache.hadoop.classification.InterfaceStability.Evolving
+package org.apache.apex.malhar.contrib.misc.algo;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Change.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Change.java
index 51d90ab..01d433b 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Change.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Change.java
@@ -18,10 +18,11 @@
  */
 package org.apache.apex.malhar.contrib.misc.math;
 
+import org.apache.apex.malhar.lib.util.BaseNumberValueOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.BaseNumberValueOperator;
 
 /**
  * Operator compares data values arriving on input port with base value input operator.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlert.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlert.java
index c554ead..d4fba9c 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlert.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlert.java
@@ -20,10 +20,11 @@
 
 import javax.validation.constraints.Min;
 
+import org.apache.apex.malhar.lib.util.BaseNumberValueOperator;
+import org.apache.apex.malhar.lib.util.KeyValPair;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.lib.util.BaseNumberValueOperator;
-import com.datatorrent.lib.util.KeyValPair;
 
 /**
  * Compares consecutive input data values, emits &lt;value,percent change value&gt; pair on alert output port, if percent change exceeds certain thresh hold value.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertKeyVal.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertKeyVal.java
index 8d75ab4..61678fc 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertKeyVal.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertKeyVal.java
@@ -22,12 +22,12 @@
 
 import javax.validation.constraints.Min;
 
+import org.apache.apex.malhar.lib.util.BaseNumberKeyValueOperator;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.commons.lang.mutable.MutableDouble;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.lib.util.BaseNumberKeyValueOperator;
-import com.datatorrent.lib.util.KeyValPair;
 
 /**
  * Operator compares consecutive values arriving at input port mapped by keys, emits &lt;key,percent change&gt; pair on output alert port if percent change exceeds percentage threshold set in operator.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertMap.java
index e8add80..a062cb3 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeAlertMap.java
@@ -23,11 +23,11 @@
 
 import javax.validation.constraints.Min;
 
+import org.apache.apex.malhar.lib.util.BaseNumberKeyValueOperator;
 import org.apache.commons.lang.mutable.MutableDouble;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.lib.util.BaseNumberKeyValueOperator;
 
 /**
  * Operator stores  &lt;key,value&gt; pair in hash map across the windows for comparison and emits hash map of &lt;key,percent change in value for each key&gt; if percent change
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeKeyVal.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeKeyVal.java
index 0600e6a..ac64a3b 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeKeyVal.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ChangeKeyVal.java
@@ -20,15 +20,14 @@
 
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.util.BaseNumberKeyValueOperator;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.commons.lang.mutable.MutableDouble;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
 
-import com.datatorrent.lib.util.BaseNumberKeyValueOperator;
-import com.datatorrent.lib.util.KeyValPair;
-
 /**
  * Operator compares &lt;key,value&gt; pairs arriving at data and base input ports and stores &lt;key,value&gt; pairs arriving at base port in hash map across the windows.
  * <p/>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareExceptMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareExceptMap.java
index bfa3c0a..7a2f325 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareExceptMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareExceptMap.java
@@ -21,11 +21,12 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.algo.MatchMap;
+import org.apache.apex.malhar.lib.util.UnifierHashMap;
+
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
 import com.datatorrent.api.annotation.Stateless;
-import com.datatorrent.lib.algo.MatchMap;
-import com.datatorrent.lib.util.UnifierHashMap;
 
 /**
  * Operator compares based on the property "key", "value", and "compare".
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareMap.java
index e263d3f..41ab9da 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CompareMap.java
@@ -20,9 +20,10 @@
 
 import java.util.HashMap;
 
+import org.apache.apex.malhar.lib.algo.MatchMap;
+
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.Stateless;
-import com.datatorrent.lib.algo.MatchMap;
 
 /**
  * This operator compares tuples subclassed from Number based on the property "key", "value", and "cmp", and matching tuples are emitted.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CountKeyVal.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CountKeyVal.java
index a229796..93a102b 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CountKeyVal.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/CountKeyVal.java
@@ -21,15 +21,15 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseKeyValueOperator;
+import org.apache.apex.malhar.lib.util.KeyValPair;
+import org.apache.apex.malhar.lib.util.UnifierCountOccurKey;
 import org.apache.commons.lang.mutable.MutableInt;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.StreamCodec;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.BaseKeyValueOperator;
-import com.datatorrent.lib.util.KeyValPair;
-import com.datatorrent.lib.util.UnifierCountOccurKey;
 
 /**
  * This Operator aggregates occurrence of keys in &lt;key,value&gt; pair at input port.&lt;Key,Occurrence count&gt; pair is emitted for each input on output port.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ExceptMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ExceptMap.java
index 2dcb583..a4d11ed 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ExceptMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/ExceptMap.java
@@ -21,10 +21,11 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.algo.MatchMap;
+import org.apache.apex.malhar.lib.util.UnifierHashMap;
+
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.Stateless;
-import com.datatorrent.lib.algo.MatchMap;
-import com.datatorrent.lib.util.UnifierHashMap;
 
 /**
  * This operator does comparison on tuple sub-classed from Number based on the property "key", "value", and "cmp", and not matched tuples are emitted.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Quotient.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Quotient.java
index 8909acd..9ba739c 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Quotient.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/Quotient.java
@@ -18,10 +18,11 @@
  */
 package org.apache.apex.malhar.contrib.misc.math;
 
+import org.apache.apex.malhar.lib.util.BaseNumberValueOperator;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
-import com.datatorrent.lib.util.BaseNumberValueOperator;
 
 /**
  * This operator adds all the values on "numerator" and "denominator" and emits quotient at end of window.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/QuotientMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/QuotientMap.java
index b37bbd5..af19d64 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/QuotientMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/QuotientMap.java
@@ -23,12 +23,12 @@
 
 import javax.validation.constraints.Min;
 
+import org.apache.apex.malhar.lib.util.BaseNumberKeyValueOperator;
 import org.apache.commons.lang.mutable.MutableDouble;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OperatorAnnotation;
-import com.datatorrent.lib.util.BaseNumberKeyValueOperator;
 
 /**
  * Add all the values for each key on "numerator" and "denominator" and emits quotient at end of window for all keys in the denominator.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/SumCountMap.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/SumCountMap.java
index b2493a1..2b7c245 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/SumCountMap.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/SumCountMap.java
@@ -21,15 +21,15 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.util.BaseNumberKeyValueOperator;
+import org.apache.apex.malhar.lib.util.UnifierHashMapInteger;
+import org.apache.apex.malhar.lib.util.UnifierHashMapSumKeys;
 import org.apache.commons.lang.mutable.MutableDouble;
 import org.apache.commons.lang.mutable.MutableInt;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.BaseNumberKeyValueOperator;
-import com.datatorrent.lib.util.UnifierHashMapInteger;
-import com.datatorrent.lib.util.UnifierHashMapSumKeys;
 
 /**
  * Emits the sum and count of values for each key at the end of window.
diff --git a/samples/src/main/java/com/datatorrent/samples/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/package-info.java
similarity index 82%
copy from samples/src/main/java/com/datatorrent/samples/package-info.java
copy to contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/package-info.java
index d1f36fd..53edcd1 100644
--- a/samples/src/main/java/com/datatorrent/samples/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/math/package-info.java
@@ -16,10 +16,5 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-/**
- * <b>com.datatorrent.lib.samplecode</b> is a collection of libraries that have examples of various operators<p>
- * <br>
- * <br>
- */
-
-package com.datatorrent.samples;
+@org.apache.hadoop.classification.InterfaceStability.Evolving
+package org.apache.apex.malhar.contrib.misc.math;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/DeleteOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/DeleteOperator.java
index 7faf96d..3b95ef6 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/DeleteOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/DeleteOperator.java
@@ -20,10 +20,11 @@
 
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.streamquery.condition.Condition;
 
 /**
  * An implementation of BaseOperator that provides sql delete query semantic on live data stream. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/GroupByHavingOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/GroupByHavingOperator.java
index 9b2eeda..515b4f9 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/GroupByHavingOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/GroupByHavingOperator.java
@@ -26,16 +26,17 @@
 
 import org.apache.apex.malhar.contrib.misc.streamquery.condition.HavingCondition;
 import org.apache.apex.malhar.contrib.misc.streamquery.function.FunctionIndex;
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.index.ColumnIndex;
 
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.streamquery.index.ColumnIndex;
-import com.datatorrent.lib.streamquery.condition.Condition;
-@Deprecated
+
 /**
  * @since 3.6.0
  */
+@Deprecated
 public class GroupByHavingOperator extends BaseOperator
 {
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/InnerJoinOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/InnerJoinOperator.java
index d3e11c3..a3b3c91 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/InnerJoinOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/InnerJoinOperator.java
@@ -22,13 +22,14 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.index.Index;
+
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.Operator;
 import com.datatorrent.api.annotation.OperatorAnnotation;
-import com.datatorrent.lib.streamquery.condition.Condition;
-import com.datatorrent.lib.streamquery.index.Index;
 
 /**
  * An implementation of Operator that reads table row data from two table data input ports. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/SelectOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/SelectOperator.java
index 08799cb..34a157a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/SelectOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/SelectOperator.java
@@ -22,11 +22,12 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.index.Index;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.streamquery.condition.Condition;
-import com.datatorrent.lib.streamquery.index.Index;
 
 /**
  * An implementation of that provides sql select query semantics on live data stream. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/UpdateOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/UpdateOperator.java
index 52ddac6..1548894 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/UpdateOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/UpdateOperator.java
@@ -21,10 +21,11 @@
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
+
 import com.datatorrent.api.DefaultInputPort;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.lib.streamquery.condition.Condition;
 
 /**
  *  An implementation of BaseOperator that provides sql update query semantic on live data stream. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/BetweenCondition.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/BetweenCondition.java
index 155470c..e25000b 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/BetweenCondition.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/BetweenCondition.java
@@ -22,7 +22,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
 
 /**
  *  A derivation of Condition that validates row by checking if the given column name value lies between given left,right range. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/CompoundCondition.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/CompoundCondition.java
index d606991..99235e3 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/CompoundCondition.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/CompoundCondition.java
@@ -22,7 +22,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
 
 /**
  * A derivation of Condition index that implements logical AND/OR select expression. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/EqualValueCondition.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/EqualValueCondition.java
index a54960d..0042c81 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/EqualValueCondition.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/EqualValueCondition.java
@@ -21,7 +21,7 @@
 import java.util.HashMap;
 import java.util.Map;
 
-import com.datatorrent.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
 
 /**
  * An implementation of condition on column equality.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/InCondition.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/InCondition.java
index d19bb99..e2b14f1 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/InCondition.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/InCondition.java
@@ -24,7 +24,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
 
 /**
  * An implementation of condition class to check if a column value is in a given set of values.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/LikeCondition.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/LikeCondition.java
index a8789fa..6502d6a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/LikeCondition.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/condition/LikeCondition.java
@@ -24,7 +24,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.condition.Condition;
+import org.apache.apex.malhar.lib.streamquery.condition.Condition;
 
 /**
  * An implementation of condition class to filter rows for which given column name value matches given regular expression. <br>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/BinaryExpression.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/BinaryExpression.java
index 4de58c1..16e2321 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/BinaryExpression.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/BinaryExpression.java
@@ -20,7 +20,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.index.Index;
+import org.apache.apex.malhar.lib.streamquery.index.Index;
 
 /**
  * Abstract class to filter row by binary expression index.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/MidIndex.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/MidIndex.java
index c165d89..e4e7d68 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/MidIndex.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/MidIndex.java
@@ -22,7 +22,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.index.ColumnIndex;
+import org.apache.apex.malhar.lib.streamquery.index.ColumnIndex;
 
 /**
  * <p>An implementation of Column Index that implements filter method based on mid index. </p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/NegateExpression.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/NegateExpression.java
index 0a6f64d..9a647e7 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/NegateExpression.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/NegateExpression.java
@@ -48,7 +48,7 @@
   }
 
   /* (non-Javadoc)
-   * @see com.datatorrent.lib.streamquery.index.Index#filter(java.util.Map, java.util.Map)
+   * @see org.apache.apex.malhar.lib.streamquery.index.Index#filter(java.util.Map, java.util.Map)
    */
   @Override
   public void filter(Map<String, Object> row, Map<String, Object> collect)
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/RoundDoubleIndex.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/RoundDoubleIndex.java
index 495063f..7a2c96a 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/RoundDoubleIndex.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/RoundDoubleIndex.java
@@ -22,7 +22,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.index.ColumnIndex;
+import org.apache.apex.malhar.lib.streamquery.index.ColumnIndex;
 
 /**
  * <p>An implementation of column index that implements filter method using Round Double Index. </p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringCaseIndex.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringCaseIndex.java
index 31c9468..1b00523 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringCaseIndex.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringCaseIndex.java
@@ -22,7 +22,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.index.ColumnIndex;
+import org.apache.apex.malhar.lib.streamquery.index.ColumnIndex;
 
 /**
  * <p>An implementation of Column Index that implements filter method using case of a string index. </p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringLenIndex.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringLenIndex.java
index f764c9e..92e3501 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringLenIndex.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/StringLenIndex.java
@@ -22,7 +22,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.index.ColumnIndex;
+import org.apache.apex.malhar.lib.streamquery.index.ColumnIndex;
 
 /**
  * <p>An implementation of Column Index that implements filter method using length of a string Index. </p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/SumExpression.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/SumExpression.java
index 91d4ec7..a03f09f 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/SumExpression.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/SumExpression.java
@@ -51,7 +51,7 @@
   }
 
   /* sum column values.
-   * @see com.datatorrent.lib.streamquery.index.Index#filter(java.util.Map, java.util.Map)
+   * @see org.apache.apex.malhar.lib.streamquery.index.Index#filter(java.util.Map, java.util.Map)
    */
   @Override
   public void filter(Map<String, Object> row, Map<String, Object> collect)
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/UnaryExpression.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/UnaryExpression.java
index 04d5fc6..97714b3 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/UnaryExpression.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/misc/streamquery/index/UnaryExpression.java
@@ -20,7 +20,7 @@
 
 import javax.validation.constraints.NotNull;
 
-import com.datatorrent.lib.streamquery.index.Index;
+import org.apache.apex.malhar.lib.streamquery.index.Index;
 
 /**
  * A base implementation of an index that filters row by unary expression.&nbsp; Subclasses should provide the
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBArrayListOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBArrayListOutputOperator.java
index a783159..ae95ad0 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBArrayListOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBArrayListOutputOperator.java
@@ -16,12 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mongodb;
+package org.apache.apex.malhar.contrib.mongodb;
 
-import com.mongodb.BasicDBObject;
 import java.util.ArrayList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import com.mongodb.BasicDBObject;
 
 /**
  * This is a MongoDB Output Operator, which uses HashMaps to map tuples to appropriate tables and columns in MongoDB,
@@ -94,8 +94,7 @@
       if ((doc = tableToDocument.get(table)) == null) {
         doc = new BasicDBObject();
         doc.put(columnList.get(i), tuple.get(i));
-      }
-      else {
+      } else {
         doc.put(columnList.get(i), tuple.get(i));
       }
       tableToDocument.put(table, doc);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBConnectable.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBConnectable.java
index 2cd2037..30a8803 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBConnectable.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBConnectable.java
@@ -16,15 +16,16 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mongodb;
+package org.apache.apex.malhar.contrib.mongodb;
 
 import java.net.UnknownHostException;
 
 import javax.validation.constraints.NotNull;
 
-import com.mongodb.*;
+import org.apache.apex.malhar.lib.db.Connectable;
 
-import com.datatorrent.lib.db.Connectable;
+import com.mongodb.DB;
+import com.mongodb.MongoClient;
 
 /**
  * MongoDB base operator, which has basic information for an i/o operator.<p><br>
@@ -111,8 +112,7 @@
       if (userName != null && passWord != null) {
         db.authenticate(userName, passWord.toCharArray());
       }
-    }
-    catch (UnknownHostException ex) {
+    } catch (UnknownHostException ex) {
       throw new RuntimeException("creating mongodb client", ex);
     }
   }
@@ -128,8 +128,7 @@
   {
     try {
       mongoClient.getConnector().getDBPortPool(mongoClient.getAddress()).get().ensureOpen();
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       return false;
     }
     return true;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBHashMapOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBHashMapOutputOperator.java
index 9196801..57eedb4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBHashMapOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBHashMapOutputOperator.java
@@ -16,10 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mongodb;
+package org.apache.apex.malhar.contrib.mongodb;
+
+import java.util.HashMap;
+import java.util.Map;
 
 import com.mongodb.BasicDBObject;
-import java.util.*;
 
 /**
  * This is a MongoDB Output Operator, which uses Lists to map tuples to appropriate tables and columns in MongoDB,
@@ -91,8 +93,7 @@
       if ((doc = tableToDocument.get(table)) == null) {
         doc = new BasicDBObject();
         doc.put(column, entry.getValue());
-      }
-      else {
+      } else {
         doc.put(column, entry.getValue());
       }
       tableToDocument.put(table, doc);
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBInputOperator.java
index 8f5496a..1008f6d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBInputOperator.java
@@ -16,20 +16,20 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mongodb;
+package org.apache.apex.malhar.contrib.mongodb;
 
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.api.InputOperator;
-import com.datatorrent.api.Operator.ActivationListener;
+import java.net.UnknownHostException;
+
+import org.slf4j.LoggerFactory;
 
 import com.mongodb.DBCursor;
 import com.mongodb.DBObject;
 import com.mongodb.MongoClient;
 
-import java.net.UnknownHostException;
-
-import org.slf4j.LoggerFactory;
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.api.DefaultOutputPort;
+import com.datatorrent.api.InputOperator;
+import com.datatorrent.api.Operator.ActivationListener;
 
 /**
  * This is the base implementation of a MongoDB input operator.&nbsp;
@@ -69,7 +69,7 @@
   /**
    * This is the output port which emits tuples read from MongoDB.
    */
-  final public transient DefaultOutputPort<T> outputPort = new DefaultOutputPort<T>();
+  public final transient DefaultOutputPort<T> outputPort = new DefaultOutputPort<T>();
 
   /**
    * Any concrete class derived from this has to implement this method
@@ -100,8 +100,7 @@
       if (userName != null && passWord != null) {
         db.authenticate(userName, passWord.toCharArray());
       }
-    }
-    catch (UnknownHostException ex) {
+    } catch (UnknownHostException ex) {
       logger.debug(ex.toString());
     }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBOutputOperator.java
index 3e79fc1..506b359 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBOutputOperator.java
@@ -16,12 +16,8 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mongodb;
+package org.apache.apex.malhar.contrib.mongodb;
 
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.api.DefaultInputPort;
-import com.datatorrent.api.Operator;
-import com.mongodb.*;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
@@ -37,6 +33,16 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.mongodb.BasicDBObject;
+import com.mongodb.DBCollection;
+import com.mongodb.DBCursor;
+import com.mongodb.DBObject;
+import com.mongodb.MongoClient;
+
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.api.DefaultInputPort;
+import com.datatorrent.api.Operator;
+
 /**
  * This is the base implementation for a non transactional output operator for MongoDB.&nbsp;
  * Subclasses should implement the column mapping for writing tuples out to MongoDB.
@@ -124,8 +130,7 @@
 
       try {
         processTuple(tuple);
-      }
-      catch (Exception ex) {
+      } catch (Exception ex) {
         throw new RuntimeException("Exception during process tuple", ex);
       }
     }
@@ -145,8 +150,7 @@
     if (cursor.hasNext()) {
       Object obj = cursor.next().get(windowIdColumnName);
       lastWindowId = (Long)obj;
-    }
-    else {
+    } else {
       BasicDBObject doc = new BasicDBObject();
       doc.put(windowIdColumnName, (long)0);
 //      doc.put(applicationIdName, 0);
@@ -154,7 +158,7 @@
       maxWindowCollection.save(doc);
     }
 
-    logger.debug("last windowid: {}" , lastWindowId);
+    logger.debug("last windowid: {}", lastWindowId);
   }
 
   /**
@@ -172,8 +176,7 @@
     tupleId = 1;
     if (windowId < lastWindowId) {
       ignoreWindow = true;
-    }
-    else if (windowId == lastWindowId) {
+    } else if (windowId == lastWindowId) {
       ignoreWindow = false;
       BasicDBObject query = new BasicDBObject();
 //      query.put(windowIdColumnName, windowId);
@@ -184,14 +187,11 @@
       StringBuilder high = new StringBuilder();
       if (queryFunction == 1) {
         queryFunction1(bb, high, low);
-      }
-      else if (queryFunction == 2) {
+      } else if (queryFunction == 2) {
         queryFunction2(bb, high, low);
-      }
-      else if (queryFunction == 3) {
+      } else if (queryFunction == 3) {
         queryFunction3(bb, high, low);
-      }
-      else {
+      } else {
         throw new RuntimeException("unknown queryFunction type:" + queryFunction);
       }
 
@@ -200,8 +200,7 @@
       for (String table : tableList) {
         db.getCollection(table).remove(query);
       }
-    }
-    else {
+    } else {
       ignoreWindow = false;
     }
   }
@@ -249,13 +248,12 @@
         tableToDocumentList.put(table, new ArrayList<DBObject>());
         tableToDocument.put(table, new BasicDBObject());
       }
-    }
-    catch (UnknownHostException ex) {
+    } catch (UnknownHostException ex) {
       logger.debug(ex.toString());
     }
   }
 
-  abstract public void setColumnMapping(String[] mapping);
+  public abstract void setColumnMapping(String[] mapping);
 
   @Override
   public void teardown()
@@ -271,14 +269,11 @@
     bb.order(ByteOrder.BIG_ENDIAN);
     if (queryFunction == 1) {
       insertFunction1(bb);
-    }
-    else if (queryFunction == 2) {
+    } else if (queryFunction == 2) {
       insertFunction2(bb);
-    }
-    else if (queryFunction == 3) {
+    } else if (queryFunction == 3) {
       insertFunction3(bb);
-    }
-    else {
+    } else {
       throw new RuntimeException("unknown insertFunction type:" + queryFunction);
     }
 //    String str = Hex.encodeHexString(bb.array());
@@ -304,8 +299,7 @@
 
         db.getCollection(table).insert(docList);
         tableToDocumentList.put(table, new ArrayList<DBObject>());
-      }
-      else {
+      } else {
         tableToDocumentList.put(table, docList);
       }
     }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBPOJOOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBPOJOOutputOperator.java
index 242793b..5b4d73d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBPOJOOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/MongoDBPOJOOutputOperator.java
@@ -16,11 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mongodb;
-
-import com.datatorrent.lib.util.PojoUtils;
-import com.datatorrent.lib.util.PojoUtils.Getter;
-import com.mongodb.BasicDBObject;
+package org.apache.apex.malhar.contrib.mongodb;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -28,8 +24,12 @@
 
 import javax.validation.constraints.NotNull;
 
+import org.apache.apex.malhar.lib.util.PojoUtils;
+import org.apache.apex.malhar.lib.util.PojoUtils.Getter;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 
+import com.mongodb.BasicDBObject;
+
 /**
  * Implementation of {@link MongoDBOutputOperator} that saves a POJO in the mongodb database. <br/>
  * <p>
@@ -114,8 +114,7 @@
     nestedKeys = new HashMap<String, String[]>();
   }
 
-  static
-  {
+  static {
     mapPrimitives.put("boolean", boolean.class);
     mapPrimitives.put("char", char.class);
     mapPrimitives.put("short", short.class);
@@ -131,12 +130,10 @@
     for (String fieldType: fieldTypes) {
       if (mapPrimitives.containsKey(fieldType)) {
         classTypes.add(mapPrimitives.get(fieldType));
-      }
-      else {
+      } else {
         try {
           classTypes.add(Class.forName(fieldType));
-        }
-        catch (ClassNotFoundException ex) {
+        } catch (ClassNotFoundException ex) {
           throw new RuntimeException(ex);
         }
       }
@@ -149,7 +146,7 @@
         nestedKeys.put(key, key.split("[.]"));
       }
       String table = tablenames.get(i);
-      if(!tableList.contains(table)){
+      if (!tableList.contains(table)) {
         tableList.add(table);
       }
     }
@@ -182,12 +179,10 @@
         }
         if (doc.containsField(subKeys[0])) {
           doc.append(subKeys[0], nestedDoc);
-        }
-        else {
+        } else {
           doc.put(subKeys[0], nestedDoc);
         }
-      }
-      else {
+      } else {
         if ((doc = tableToDocument.get(table)) == null) {
           doc = new BasicDBObject();
         }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/package-info.java
index d148f1e..07c8e26 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mongodb/package-info.java
@@ -20,4 +20,4 @@
  * MongoDB operators and utilities.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.mongodb;
+package org.apache.apex.malhar.contrib.mongodb;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttInputOperator.java
index 081f60c..2964b18 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttInputOperator.java
@@ -16,17 +16,22 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mqtt;
+package org.apache.apex.malhar.contrib.mqtt;
 
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.api.InputOperator;
-import com.datatorrent.api.Operator.ActivationListener;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ArrayBlockingQueue;
-import org.fusesource.mqtt.client.*;
+
+import org.fusesource.mqtt.client.BlockingConnection;
+import org.fusesource.mqtt.client.MQTT;
+import org.fusesource.mqtt.client.Message;
+import org.fusesource.mqtt.client.QoS;
+import org.fusesource.mqtt.client.Topic;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.api.InputOperator;
+import com.datatorrent.api.Operator.ActivationListener;
 
 /**
  * This is the base implementation for and MQTT input operator.&nbsp;
@@ -196,8 +201,7 @@
             try {
               Message msg = connection.receive();
               holdingBuffer.add(msg);
-            }
-            catch (Exception ex) {
+            } catch (Exception ex) {
               LOG.error("Trouble receiving", ex);
             }
           }
@@ -205,8 +209,7 @@
 
       });
       thread.start();
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
       LOG.error("Caught exception during activation: ", ex);
       throw new RuntimeException(ex);
     }
@@ -219,15 +222,12 @@
     try {
       thread.interrupt();
       thread.join();
-    }
-    catch (InterruptedException ex) {
+    } catch (InterruptedException ex) {
       LOG.error("interrupted");
-    }
-    finally {
+    } finally {
       try {
         connection.disconnect();
-      }
-      catch (Exception ex) {
+      } catch (Exception ex) {
         LOG.error("Caught exception during disconnect", ex);
       }
     }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttOutputOperator.java
index bfdf79c..c1b6948 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractMqttOutputOperator.java
@@ -16,15 +16,15 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mqtt;
+package org.apache.apex.malhar.contrib.mqtt;
 
-import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.api.Context.OperatorContext;
 import javax.validation.constraints.NotNull;
 import org.fusesource.mqtt.client.BlockingConnection;
 import org.fusesource.mqtt.client.MQTT;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.common.util.BaseOperator;
 
 /**
  * This is the base implementation of an MQTT output operator.&nbsp;
@@ -69,8 +69,7 @@
       }
       connection = client.blockingConnection();
       connection.connect();
-    }
-    catch (Throwable t) {
+    } catch (Throwable t) {
       throw new RuntimeException(t);
     }
   }
@@ -80,8 +79,8 @@
   {
     try {
       connection.disconnect();
-    }
-    catch (Exception ex) {
+    } catch (Exception ex) {
+      //ignore
     }
   }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttInputOperator.java
index 473201b..2bd756f 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttInputOperator.java
@@ -16,10 +16,10 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mqtt;
+package org.apache.apex.malhar.contrib.mqtt;
 
-import com.datatorrent.api.DefaultOutputPort;
 import org.fusesource.mqtt.client.Message;
+import com.datatorrent.api.DefaultOutputPort;
 
 /**
  * This is the base implementation for a single port MQTT input operator.&nbsp;
@@ -50,7 +50,7 @@
   /**
    * This output port emits tuples, which were extracted from MQTT messages.
    */
-  final public transient DefaultOutputPort<T> outputPort = new DefaultOutputPort<T>();
+  public final transient DefaultOutputPort<T> outputPort = new DefaultOutputPort<T>();
 
   /**
    * Any concrete class derived from AbstractSinglePortMqttInputOperator has to implement this method
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttOutputOperator.java
index 76d7955..fe63196 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/AbstractSinglePortMqttOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mqtt;
+package org.apache.apex.malhar.contrib.mqtt;
 
 import com.datatorrent.api.DefaultInputPort;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/MqttClientConfig.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/MqttClientConfig.java
index e8f52df..e3ae2bf 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/MqttClientConfig.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/MqttClientConfig.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.mqtt;
+package org.apache.apex.malhar.contrib.mqtt;
 
 import org.fusesource.mqtt.client.QoS;
 
@@ -40,7 +40,7 @@
   private int connectionTimeout = 500;
   private int connectAttemptsMax = 1;
   private String host = "localhost";
-  private int port= 1883;
+  private int port = 1883;
 
   /**
    * Gets the MQTT client ID
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/package-info.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/package-info.java
index 0c94e16..e875aaf 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/package-info.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/mqtt/package-info.java
@@ -20,4 +20,4 @@
  * MQTT operators and utilities.
  */
 @org.apache.hadoop.classification.InterfaceStability.Evolving
-package com.datatorrent.contrib.mqtt;
+package org.apache.apex.malhar.contrib.mqtt;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiInputOperator.java
index b040d87..28938e4 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -152,7 +152,8 @@
       do {
         tuples.add(createTuple(dataPacket));
         dataPacket = transaction.receive();
-      } while (dataPacket != null);
+      }
+      while (dataPacket != null);
 
       // confirm all of the expected data was received by comparing check-sums, does not complete the transaction
       transaction.confirm();
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiOutputOperator.java
index 53cf302..a879a9d 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.io.IOException;
 import java.util.List;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiSinglePortInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiSinglePortInputOperator.java
index c246de0..758b963 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiSinglePortInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/AbstractNiFiSinglePortInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.util.List;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacket.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacket.java
index 6ebe9d1..83c73ac 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacket.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacket.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.util.Map;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacketBuilder.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacketBuilder.java
index 2ea3cd6..5aa562b 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacketBuilder.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiDataPacketBuilder.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.io.Serializable;
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortInputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortInputOperator.java
index 63c76f4..f133a93 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortInputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortInputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.io.IOException;
 import java.io.InputStream;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortOutputOperator.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortOutputOperator.java
index 5c5b08c..409e667 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortOutputOperator.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/NiFiSinglePortOutputOperator.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.util.ArrayList;
 import java.util.List;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/StandardNiFiDataPacket.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/StandardNiFiDataPacket.java
index 60cfbe3..85aad60 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/StandardNiFiDataPacket.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/nifi/StandardNiFiDataPacket.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.nifi;
+package org.apache.apex.malhar.contrib.nifi;
 
 import java.io.Serializable;
 import java.util.Map;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/AbstractParquetFileReader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/AbstractParquetFileReader.java
index 14ab918..2160303 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/AbstractParquetFileReader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/AbstractParquetFileReader.java
@@ -16,11 +16,12 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parquet;
+package org.apache.apex.malhar.contrib.parquet;
 
 import java.io.IOException;
 import java.io.InputStream;
 
+import org.apache.apex.malhar.lib.io.fs.AbstractFileInputOperator;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.parquet.example.data.Group;
@@ -30,7 +31,6 @@
 import org.apache.parquet.schema.MessageTypeParser;
 
 import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.lib.io.fs.AbstractFileInputOperator;
 
 /**
  * Base implementation of ParquetFileReader. Reads Parquet files from input
@@ -48,7 +48,7 @@
   protected transient MessageType schema;
   /**
    * Parquet Schema as a string. E.g: message
-   * com.datatorrent.contrib.parquet.eventsEventRecord {required INT32
+   * org.apache.apex.malhar.contrib.parquet.eventsEventRecord {required INT32
    * event_id;required BINARY org_id (UTF8);required INT64 long_id;optional
    * BOOLEAN css_file_loaded;optional FLOAT float_val;optional DOUBLE
    * double_val;}
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/ParquetFilePOJOReader.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/ParquetFilePOJOReader.java
index 36c5b55..e489dae 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/ParquetFilePOJOReader.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parquet/ParquetFilePOJOReader.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parquet;
+package org.apache.apex.malhar.contrib.parquet;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -24,6 +24,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.util.FieldInfo.SupportType;
+import org.apache.apex.malhar.lib.util.PojoUtils;
 import org.apache.commons.lang3.ClassUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.parquet.example.data.Group;
@@ -34,8 +36,6 @@
 import com.datatorrent.api.Context;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.annotation.OutputPortFieldAnnotation;
-import com.datatorrent.lib.util.FieldInfo.SupportType;
-import com.datatorrent.lib.util.PojoUtils;
 
 /**
  * <p>
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/AbstractCsvParser.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/AbstractCsvParser.java
index b611c5d..cf4f746 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/AbstractCsvParser.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/AbstractCsvParser.java
@@ -16,29 +16,37 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
 import java.util.ArrayList;
 
 import javax.validation.constraints.NotNull;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.supercsv.cellprocessor.Optional;
+import org.supercsv.cellprocessor.ParseChar;
+import org.supercsv.cellprocessor.ParseDate;
+import org.supercsv.cellprocessor.ParseDouble;
+import org.supercsv.cellprocessor.ParseInt;
+import org.supercsv.cellprocessor.ParseLong;
+import org.supercsv.cellprocessor.ift.CellProcessor;
+import org.supercsv.io.ICsvReader;
 import org.supercsv.prefs.CsvPreference;
 
-import com.datatorrent.common.util.BaseOperator;
-import com.datatorrent.api.Context.OperatorContext;
-import com.datatorrent.api.DefaultInputPort;
-import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.netlet.util.DTThrowable;
-import com.datatorrent.lib.util.ReusableStringReader;
-import java.io.*;
+import org.apache.apex.malhar.lib.util.ReusableStringReader;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.supercsv.cellprocessor.*;
-import org.supercsv.cellprocessor.ift.CellProcessor;
-import org.supercsv.io.*;
+
+import com.datatorrent.api.Context.OperatorContext;
+import com.datatorrent.api.DefaultInputPort;
+import com.datatorrent.api.DefaultOutputPort;
+import com.datatorrent.common.util.BaseOperator;
+import com.datatorrent.netlet.util.DTThrowable;
 
 /**
  *  This is a base implementation of Delimited data parser which can be extended to output
@@ -78,7 +86,7 @@
   public enum FIELD_TYPE
   {
     BOOLEAN, DOUBLE, INTEGER, FLOAT, LONG, SHORT, CHARACTER, STRING, DATE
-  };
+  }
 
   @NotNull
   private transient ReusableStringReader csvStringReader = new ReusableStringReader();
@@ -128,8 +136,7 @@
           logger.debug("data in loop is {}", data.toString());
           output.emit(data);
         }
-      }
-      catch (IOException ex) {
+      } catch (IOException ex) {
         throw new RuntimeException(ex);
       }
 
@@ -157,12 +164,10 @@
             field.setType(temp[1]);
             getFields().add(field);
           }
-        }
-        else {
+        } else {
           logger.debug("File containing fields and their data types does not exist.Please specify the fields and data type through properties of this operator.");
         }
-      }
-      catch (IOException ex) {
+      } catch (IOException ex) {
         DTThrowable.rethrow(ex);
       }
 
@@ -185,29 +190,21 @@
       properties[i] = getFields().get(i).name;
       if (type == FIELD_TYPE.DOUBLE) {
         processors[i] = new Optional(new ParseDouble());
-      }
-      else if (type == FIELD_TYPE.INTEGER) {
+      } else if (type == FIELD_TYPE.INTEGER) {
         processors[i] = new Optional(new ParseInt());
-      }
-      else if (type == FIELD_TYPE.FLOAT) {
+      } else if (type == FIELD_TYPE.FLOAT) {
         processors[i] = new Optional(new ParseDouble());
-      }
-      else if (type == FIELD_TYPE.LONG) {
+      } else if (type == FIELD_TYPE.LONG) {
         processors[i] = new Optional(new ParseLong());
-      }
-      else if (type == FIELD_TYPE.SHORT) {
+      } else if (type == FIELD_TYPE.SHORT) {
         processors[i] = new Optional(new ParseInt());
-      }
-      else if (type == FIELD_TYPE.STRING) {
+      } else if (type == FIELD_TYPE.STRING) {
         processors[i] = new Optional();
-      }
-      else if (type == FIELD_TYPE.CHARACTER) {
+      } else if (type == FIELD_TYPE.CHARACTER) {
         processors[i] = new Optional(new ParseChar());
-      }
-      else if (type == FIELD_TYPE.BOOLEAN) {
+      } else if (type == FIELD_TYPE.BOOLEAN) {
         processors[i] = new Optional(new ParseChar());
-      }
-      else if (type == FIELD_TYPE.DATE) {
+      } else if (type == FIELD_TYPE.DATE) {
         processors[i] = new Optional(new ParseDate("dd/MM/yyyy"));
       }
     }
@@ -219,8 +216,7 @@
   {
     try {
       csvReader.close();
-    }
-    catch (IOException e) {
+    } catch (IOException e) {
       DTThrowable.rethrow(e);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CellProcessorBuilder.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CellProcessorBuilder.java
index 292031e..4b1c491 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CellProcessorBuilder.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CellProcessorBuilder.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.util.Map;
 
@@ -38,10 +38,9 @@
 import org.supercsv.cellprocessor.ift.LongCellProcessor;
 import org.supercsv.util.CsvContext;
 
+import org.apache.apex.malhar.contrib.parser.Schema.FieldType;
 import org.apache.commons.lang3.StringUtils;
 
-import com.datatorrent.contrib.parser.Schema.FieldType;
-
 /**
  * Helper class with methods to generate CellProcessor objects. Cell processors
  * are an integral part of reading and writing with Super CSV - they automate
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CommonLogParser.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CommonLogParser.java
index 7490166..16ca6d5 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CommonLogParser.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CommonLogParser.java
@@ -48,30 +48,30 @@
 {
   private static final Logger logger = LoggerFactory.getLogger(CommonLogParser.class);
 
-  private String schema="{\n" +
-    "  \"fields\": [{\n" +
-    "    \"field\": \"host\",\n" +
-    "    \"regex\": \"^([0-9.]+)\"\n" +
-    "  }, {\n" +
-    "    \"field\": \"rfc931\",\n" +
-    "    \"regex\": \"(\\\\S+)\"\n" +
-    "  }, {\n" +
-    "    \"field\": \"username\",\n" +
-    "    \"regex\": \"(\\\\S+)\"\n" +
-    "  }, {\n" +
-    "    \"field\": \"datetime\",\n" +
-    "    \"regex\": \"\\\\[(.*?)\\\\]\"\n" +
-    "  },{\n" +
-    "    \"field\": \"request\",\n" +
-    "    \"regex\": \"\\\"((?:[^\\\"]|\\\")+)\\\"\"\n" +
-    "  },{\n" +
-    "    \"field\": \"statusCode\",\n" +
-    "    \"regex\": \"(\\\\d{3})\"\n" +
-    "  },{\n" +
-    "    \"field\": \"bytes\",\n" +
-    "    \"regex\": \"(\\\\d+|-)\"\n" +
-    "  }]\n" +
-    "}";
+  private String schema = "{\n" +
+      "  \"fields\": [{\n" +
+      "    \"field\": \"host\",\n" +
+      "    \"regex\": \"^([0-9.]+)\"\n" +
+      "  }, {\n" +
+      "    \"field\": \"rfc931\",\n" +
+      "    \"regex\": \"(\\\\S+)\"\n" +
+      "  }, {\n" +
+      "    \"field\": \"username\",\n" +
+      "    \"regex\": \"(\\\\S+)\"\n" +
+      "  }, {\n" +
+      "    \"field\": \"datetime\",\n" +
+      "    \"regex\": \"\\\\[(.*?)\\\\]\"\n" +
+      "  },{\n" +
+      "    \"field\": \"request\",\n" +
+      "    \"regex\": \"\\\"((?:[^\\\"]|\\\")+)\\\"\"\n" +
+      "  },{\n" +
+      "    \"field\": \"statusCode\",\n" +
+      "    \"regex\": \"(\\\\d{3})\"\n" +
+      "  },{\n" +
+      "    \"field\": \"bytes\",\n" +
+      "    \"regex\": \"(\\\\d+|-)\"\n" +
+      "  }]\n" +
+      "}";
 
 
   @Override
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CsvParser.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CsvParser.java
index ea406e9..a7da203 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CsvParser.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/CsvParser.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.io.IOException;
 import java.util.List;
@@ -32,6 +32,10 @@
 import org.supercsv.io.CsvMapReader;
 import org.supercsv.prefs.CsvPreference;
 
+import org.apache.apex.malhar.contrib.parser.DelimitedSchema.Field;
+import org.apache.apex.malhar.lib.parser.Parser;
+import org.apache.apex.malhar.lib.util.KeyValPair;
+import org.apache.apex.malhar.lib.util.ReusableStringReader;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -40,10 +44,6 @@
 import com.datatorrent.api.AutoMetric;
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.contrib.parser.DelimitedSchema.Field;
-import com.datatorrent.lib.parser.Parser;
-import com.datatorrent.lib.util.KeyValPair;
-import com.datatorrent.lib.util.ReusableStringReader;
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/DelimitedSchema.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/DelimitedSchema.java
index 1285b33..745ad2f 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/DelimitedSchema.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/DelimitedSchema.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.io.IOException;
 import java.util.Collections;
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthParser.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthParser.java
index 716d3f6..90545ff 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthParser.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthParser.java
@@ -17,7 +17,7 @@
  * under the License.
  */
 
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.lang.reflect.Field;
 import java.text.DateFormat;
@@ -32,6 +32,9 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.parser.Parser;
+import org.apache.apex.malhar.lib.util.KeyValPair;
+import org.apache.apex.malhar.lib.util.PojoUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang3.ClassUtils;
 
@@ -44,9 +47,6 @@
 import com.datatorrent.api.Context;
 import com.datatorrent.api.DefaultOutputPort;
 import com.datatorrent.api.Operator;
-import com.datatorrent.lib.parser.Parser;
-import com.datatorrent.lib.util.KeyValPair;
-import com.datatorrent.lib.util.PojoUtils;
 
 /**
  * Operator that parses a fixed width record against a specified schema <br>
@@ -144,7 +144,7 @@
         err.emit(new KeyValPair<>(incomingString, "Record length mis-match/shorter tuple"));
       }
       logger.error("Tuple could not be parsed. Reason Record length mis-match/shorter tuple. " +
-        "Expected length " + recordLength + " Actual length " + incomingString.length());
+          "Expected length " + recordLength + " Actual length " + incomingString.length());
       errorTupleCount++;
       return;
     }
@@ -153,7 +153,7 @@
         err.emit(new KeyValPair<>(incomingString, "Record length mis-match/longer tuple"));
       }
       logger.error("Tuple could not be parsed. Reason Record length mis-match/longer tuple. " +
-        "Expected length " + recordLength + " Actual length " + incomingString.length());
+          "Expected length " + recordLength + " Actual length " + incomingString.length());
       errorTupleCount++;
       return;
     }
@@ -272,7 +272,7 @@
     try {
       Field f = clazz.getDeclaredField(fieldName);
       FixedWidthParser.TypeInfo t = new FixedWidthParser.TypeInfo(f.getName(),
-        ClassUtils.primitiveToWrapper(f.getType()));
+          ClassUtils.primitiveToWrapper(f.getType()));
       t.setter = PojoUtils.createSetter(clazz, t.name, t.type);
       setters.add(t);
     } catch (NoSuchFieldException e) {
@@ -304,7 +304,7 @@
         FixedWidthSchema.Field currentField = fields.get(i);
         FixedWidthParser.TypeInfo typeInfo = setters.get(i);
         validateAndSetCurrentField(currentField,
-          values[i], typeInfo, pojoObject, toEmit);
+            values[i], typeInfo, pojoObject, toEmit);
       }
     } catch (StringIndexOutOfBoundsException e) {
       throw new RuntimeException("Record length and tuple length mismatch ", e);
@@ -325,7 +325,7 @@
    * @param toEmit the map to be emitted
    */
   private void validateAndSetCurrentField(FixedWidthSchema.Field currentField,
-    String value, FixedWidthParser.TypeInfo typeInfo, Object pojoObject, HashMap toEmit)
+      String value, FixedWidthParser.TypeInfo typeInfo, Object pojoObject, HashMap toEmit)
   {
     try {
       String fieldName = currentField.getName();
@@ -381,7 +381,7 @@
       throw new RuntimeException("Error parsing" + value + " to Integer type", e);
     } catch (ParseException e) {
       throw new RuntimeException("Error parsing" + value, e);
-    }catch (Exception e) {
+    } catch (Exception e) {
       throw new RuntimeException("Error setting " + value + " in the given class" + typeInfo.toString(), e);
     }
   }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthSchema.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthSchema.java
index bd26f06..b691ab2 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthSchema.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/FixedWidthSchema.java
@@ -17,7 +17,7 @@
  * under the License.
  */
 
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.io.IOException;
 import java.util.Collections;
@@ -67,11 +67,11 @@
   /**
    * Default Alignment
    */
-  public static final String DEFAULT_ALIGNMENT= "left";
+  public static final String DEFAULT_ALIGNMENT = "left";
   /**
    * JSON key string for Field Alignment
    */
-  public static final String FIELD_ALIGNMENT ="alignment";
+  public static final String FIELD_ALIGNMENT = "alignment";
 
   public static final Logger logger = LoggerFactory.getLogger(FixedWidthSchema.class);
   /**
@@ -160,13 +160,13 @@
     for (int i = 0; i < fieldArray.length(); i++) {
       JSONObject obj = fieldArray.getJSONObject(i);
       Field field = new Field(obj.getString(NAME),
-        obj.getString(TYPE).toUpperCase(), obj.getInt(FIELD_LENGTH));
-      if(obj.has(FIELD_PADDING_CHARACTER)) {
+          obj.getString(TYPE).toUpperCase(), obj.getInt(FIELD_LENGTH));
+      if (obj.has(FIELD_PADDING_CHARACTER)) {
         field.setPadding(obj.getString(FIELD_PADDING_CHARACTER).charAt(0));
       } else {
         field.setPadding(globalPadding);
       }
-      if(obj.has(FIELD_ALIGNMENT)) {
+      if (obj.has(FIELD_ALIGNMENT)) {
         field.setAlignment(obj.getString(FIELD_ALIGNMENT));
       } else {
         field.setAlignment(globalAlignment);
@@ -255,8 +255,8 @@
       this.dateFormat = DEFAULT_DATE_FORMAT;
       this.trueValue = DEFAULT_TRUE_VALUE;
       this.falseValue = DEFAULT_FALSE_VALUE;
-      this.padding=' ';
-      this.alignment=DEFAULT_ALIGNMENT;
+      this.padding = ' ';
+      this.alignment = DEFAULT_ALIGNMENT;
     }
 
     /**
@@ -335,6 +335,7 @@
     {
       this.falseValue = falseValue;
     }
+
     /**
      * Get the field padding
      * @return padding gets the padding for the individual field.
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/JsonParser.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/JsonParser.java
index 1b81e6f..7b74d77 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/JsonParser.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/JsonParser.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.io.IOException;
 import java.util.Iterator;
@@ -26,6 +26,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.parser.Parser;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import com.fasterxml.jackson.databind.DeserializationFeature;
@@ -42,8 +44,6 @@
 import com.datatorrent.api.AutoMetric;
 import com.datatorrent.api.Context.OperatorContext;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.lib.parser.Parser;
-import com.datatorrent.lib.util.KeyValPair;
 import com.datatorrent.netlet.util.DTThrowable;
 
 /**
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogParser.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogParser.java
index c9e06ba..9ba4ca6 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogParser.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogParser.java
@@ -23,6 +23,9 @@
 import org.codehaus.jettison.json.JSONException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
+import org.apache.apex.malhar.lib.parser.Parser;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.commons.lang3.CharEncoding;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -30,8 +33,6 @@
 import com.datatorrent.api.AutoMetric;
 import com.datatorrent.api.Context;
 import com.datatorrent.api.DefaultOutputPort;
-import com.datatorrent.lib.parser.Parser;
-import com.datatorrent.lib.util.KeyValPair;
 
 /**
  * Operator that parses a log string tuple against the
@@ -203,7 +204,8 @@
    * Get log schema details (field, regex etc)
    * @return logSchemaDetails
    */
-  public LogSchemaDetails getLogSchemaDetails() {
+  public LogSchemaDetails getLogSchemaDetails()
+  {
     return logSchemaDetails;
   }
 
@@ -211,7 +213,8 @@
    * Set log schema details like (fields and regex)
    * @param logSchemaDetails
    */
-  public void setLogSchemaDetails(LogSchemaDetails logSchemaDetails) {
+  public void setLogSchemaDetails(LogSchemaDetails logSchemaDetails)
+  {
     this.logSchemaDetails = logSchemaDetails;
   }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogSchemaDetails.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogSchemaDetails.java
index 1bf2ec4..7cb9148 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogSchemaDetails.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/LogSchemaDetails.java
@@ -18,16 +18,16 @@
  */
 package org.apache.apex.malhar.contrib.parser;
 
-import org.codehaus.jettison.json.JSONArray;
-import org.codehaus.jettison.json.JSONException;
-import org.codehaus.jettison.json.JSONObject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * <p>
@@ -87,7 +87,7 @@
     JSONObject jsonObject = new JSONObject(json);
     JSONArray fieldArray = jsonObject.getJSONArray("fields");
 
-    for(int i = 0; i < fieldArray.length(); i++) {
+    for (int i = 0; i < fieldArray.length(); i++) {
       JSONObject obj = fieldArray.getJSONObject(i);
       Field field = new Field(obj.getString("field"), obj.getString("regex"));
       this.fields.add(field);
@@ -101,7 +101,7 @@
   public void createPattern()
   {
     StringBuffer pattern = new StringBuffer();
-    for(Field field: this.getFields()) {
+    for (Field field: this.getFields()) {
       pattern.append(field.getRegex()).append(" ");
     }
     logger.info("Created pattern for parsing the log {}", pattern.toString().trim());
@@ -123,7 +123,7 @@
       if (m.find()) {
         int i = 1;
         logObject  = new JSONObject();
-        for(String field: this.getFieldNames()) {
+        for (String field: this.getFieldNames()) {
           if (i > count) {
             break;
           }
@@ -228,7 +228,7 @@
     @Override
     public String toString()
     {
-      return "Fields [name=" + name + ", regex=" + regex +"]";
+      return "Fields [name=" + name + ", regex=" + regex + "]";
     }
   }
 
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/RegexParser.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/RegexParser.java
index a68c928..2fe5010 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/RegexParser.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/RegexParser.java
@@ -16,7 +16,7 @@
  * specific language governing permissions and limitations
  * under the License.
  */
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
@@ -29,6 +29,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.apex.malhar.lib.parser.Parser;
+import org.apache.apex.malhar.lib.util.KeyValPair;
 import org.apache.commons.beanutils.BeanUtils;
 import org.apache.commons.beanutils.ConversionException;
 import org.apache.commons.beanutils.ConvertUtils;
@@ -40,8 +42,6 @@
 import com.google.common.annotations.VisibleForTesting;
 
 import com.datatorrent.api.Context;
-import com.datatorrent.lib.parser.Parser;
-import com.datatorrent.lib.util.KeyValPair;
 
 /**
  * Operator parses tuple based on regex pattern and populates POJO matching the user defined schema <br>
@@ -117,13 +117,13 @@
         Object object = ctor.newInstance();
 
         if (matcher.find()) {
-          for (int i = 0; i <= matcher.groupCount()-1; i++) {
+          for (int i = 0; i <= matcher.groupCount() - 1; i++) {
             if (delimitedParserSchema.getFields().get(i).getType() == DelimitedSchema.FieldType.DATE) {
               DateTimeConverter dtConverter = new DateConverter();
               dtConverter.setPattern((String)delimitedParserSchema.getFields().get(i).getConstraints().get(DelimitedSchema.DATE_FORMAT));
               ConvertUtils.register(dtConverter, Date.class);
             }
-            BeanUtils.setProperty(object, delimitedParserSchema.getFields().get(i).getName(), matcher.group(i+1));
+            BeanUtils.setProperty(object, delimitedParserSchema.getFields().get(i).getName(), matcher.group(i + 1));
           }
           patternMatched = true;
         }
diff --git a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/Schema.java b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/Schema.java
index 727db7f..2f0b3e1 100644
--- a/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/Schema.java
+++ b/contrib/src/main/java/org/apache/apex/malhar/contrib/parser/Schema.java
@@ -17,7 +17,7 @@
  * under the License.
  */
 
-package com.datatorrent.contrib.parser;
+package org.apache.apex.malhar.contrib.parser;
 
 import java.util.Collections;
 import java.util.LinkedList;
diff --git