[BAHIR-314] Add Bahir Flink release 1.1.0
diff --git a/site/docs/flink/1.0/documentation.md b/site/docs/flink/1.0/documentation.md
index 55b557f..4c8b3ef 100644
--- a/site/docs/flink/1.0/documentation.md
+++ b/site/docs/flink/1.0/documentation.md
@@ -1,7 +1,7 @@
 ---
 layout: page
-title: Extensions for Apache Flink (1.0.0-SNAPSHOT)
-description: Extensions for Apache Flink (1.0.0-SNAPSHOT)
+title: Extensions for Apache Flink (1.0)
+description: Extensions for Apache Flink (1.0)
 group: nav-right
 ---
 <!--
diff --git a/site/docs/flink/1.1.0/documentation.md b/site/docs/flink/1.1.0/documentation.md
new file mode 100644
index 0000000..e652510
--- /dev/null
+++ b/site/docs/flink/1.1.0/documentation.md
@@ -0,0 +1,50 @@
+---
+layout: page
+title: Extensions for Apache Flink (1.1.0)
+description: Extensions for Apache Flink (1.1.0)
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+### Apache Bahir Extensions for Apache Flink
+
+<br/>
+
+#### Streaming Connectors
+
+[ActiveMQ connector](../flink-streaming-activemq)
+
+[Akka connector](../flink-streaming-akka)
+
+[Flume connector](../flink-streaming-flume)
+
+[InfluxDB connector](../flink-streaming-influxdb)
+
+[InfluxDB2 connector](../flink-streaming-influxdb2) ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
+
+[Kudu connector](../flink-streaming-kudu) ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
+
+[Netty connector](../flink-streaming-netty)
+
+[Pinot connector](../flink-streaming-pinot) ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
+
+[Redis connector](../flink-streaming-redis)
diff --git a/site/docs/flink/1.1.0/flink-streaming-activemq.md b/site/docs/flink/1.1.0/flink-streaming-activemq.md
new file mode 100644
index 0000000..19cd126
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-activemq.md
@@ -0,0 +1,44 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for ActiveMQ
+description: Apache Flink Streaming Connector for ActiveMQ
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink ActiveMQ Connector
+
+This connector provides a source and sink to [Apache ActiveMQ](http://activemq.apache.org/)™
+To use this connector, add the following dependency to your project:
+
+    <dependency>
+      <groupId>org.apache.bahir</groupId>
+      <artifactId>flink-connector-activemq_2.11</artifactId>
+      <version>1.1.0</version>
+    </dependency>
+
+*Version Compatibility*: This module is compatible with ActiveMQ 5.14.0.
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.2/dev/linking.html).
+
+The source class is called `AMQSource`, and the sink is `AMQSink`.
diff --git a/site/docs/flink/1.1.0/flink-streaming-akka.md b/site/docs/flink/1.1.0/flink-streaming-akka.md
new file mode 100644
index 0000000..b85f7f6
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-akka.md
@@ -0,0 +1,66 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for Akka
+description: Apache Flink Streaming Connector for Akka
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink Akka Connector
+
+This connector provides a sink to [Akka](http://akka.io/) source actors in an ActorSystem.
+To use this connector, add the following dependency to your project:
+
+    <dependency>
+      <groupId>org.apache.bahir</groupId>
+      <artifactId>flink-connector-akka_2.11</artifactId>
+      <version>1.1.0</version>
+    </dependency>
+    
+*Version Compatibility*: This module is compatible with Akka 2.0+.
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.2/dev/linking.html).
+    
+## Configuration
+    
+The configurations for the Receiver Actor System in Flink Akka connector can be created using the standard typesafe `Config (com.typesafe.config.Config)` object.
+    
+To enable acknowledgements, the custom configuration `akka.remote.auto-ack` can be used.
+
+The user can set any of the default configurations allowed by Akka as well as custom configurations allowed by the connector.
+   
+A sample configuration can be defined as follows:
+    
+    String configFile = getClass().getClassLoader()
+          .getResource("feeder_actor.conf").getFile();
+    Config config = ConfigFactory.parseFile(new File(configFile));    
+    
+## Message Types
+    
+There are 3 different kind of message types which the receiver Actor in Flink Akka connector can receive.
+    
+- message containing `Iterable<Object>` data
+   
+- message containing generic `Object` data
+   
+- message containing generic `Object` data and a `Timestamp` value passed as `Tuple2<Object, Long>`.
diff --git a/site/docs/flink/1.1.0/flink-streaming-flume.md b/site/docs/flink/1.1.0/flink-streaming-flume.md
new file mode 100644
index 0000000..fff1917
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-flume.md
@@ -0,0 +1,47 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for Apache Flume
+description: Apache Flink Streaming Connector for Apache Flume
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink Flume Connector
+
+This connector provides a sink that can send data to [Apache Flume](https://flume.apache.org/)™. To use this connector, add the
+following dependency to your project:
+
+    <dependency>
+      <groupId>org.apache.bahir</groupId>
+      <artifactId>flink-connector-flume_2.11</artifactId>
+      <version>1.1.0</version>
+    </dependency>
+
+*Version Compatibility*: This module is compatible with Flume 1.8.0.
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.2/dev/linking.html).
+
+To create a `FlumeSink` instantiate the following constructor:
+
+    FlumeSink(String host, int port, SerializationSchema<IN> schema)
+
diff --git a/site/docs/flink/1.1.0/flink-streaming-influxdb.md b/site/docs/flink/1.1.0/flink-streaming-influxdb.md
new file mode 100644
index 0000000..7b03d94
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-influxdb.md
@@ -0,0 +1,59 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for InfluxDB
+description: Apache Flink Streaming Connector for InfluxDB
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink InfluxDB Connector
+
+This connector provides a sink that can send data to [InfluxDB](https://www.influxdata.com/). To use this connector, add the
+following dependency to your project:
+
+    <dependency>
+      <groupId>org.apache.bahir</groupId>
+      <artifactId>flink-connector-influxdb_2.11</artifactId>
+      <version>1.1.0</version>
+    </dependency>
+
+*Version Compatibility*: This module is compatible with InfluxDB 1.3.x   
+*Requirements*: Java 1.8+
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.3/dev/linking.html).
+ 
+## Installing InfluxDB
+Follow the instructions from the [InfluxDB download page](https://portal.influxdata.com/downloads#influxdb).
+  
+## Examples
+
+### JAVA API
+
+    DataStream<InfluxDBPoint> dataStream = ...
+    InfluxDBConfig influxDBConfig = InfluxDBConfig.builder(String host, String username, String password, String dbName)
+    dataStream.addSink(new InfluxDBSink(influxDBConfig));
+
+
+See end-to-end examples at [InfluxDB Examples](https://github.com/apache/bahir-flink/tree/master/flink-connector-influxdb/examples)
+
+
diff --git a/site/docs/flink/1.1.0/flink-streaming-influxdb2.md b/site/docs/flink/1.1.0/flink-streaming-influxdb2.md
new file mode 100644
index 0000000..86a8237
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-influxdb2.md
@@ -0,0 +1,234 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for InfluxDB2
+description: Apache Flink Streaming Connector for InfluxDB2
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink InfluxDB Connector
+
+This connector provides a Source that parses the [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and a Sink that can write to [InfluxDB](https://www.influxdata.com/). The Source implements the unified [Data Source API](https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/stream/sources.html). Our sink implements the unified [Sink API](https://cwiki.apache.org/confluence/display/FLINK/FLIP-143%3A+Unified+Sink+API#FLIP143:UnifiedSinkAPI-SinkAPI).
+
+The InfluxDB Source serves as an output target for [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) (and compatible tools). Telegraf pushes data to the source. The process is push-based, so it is a stateless (non-replayable) source.
+
+![Flink InfluxDB Connector Architecture](media/connector-architecture.png)
+
+## Installation
+
+To use this connector, add the following dependency to your project:
+
+```xml
+<dependency>
+  <groupId>org.apache.bahir</groupId>
+  <artifactId>flink-connector-influxdb2_2.12</artifactId>
+  <version>1.1.0</version>
+</dependency>
+```
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to shade them into your job jar for cluster execution. See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.12/dev/project-configuration.html#adding-connector-and-library-dependencies).
+
+## Compatibility
+
+This module is compatible with InfluxDB 2.x and InfluxDB 1.8+. See more information [here](https://github.com/influxdata/influxdb-client-java#influxdb-client-java).
+
+## Source
+
+The Source accepts data in the form of the [Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/). One HTTP server per source instance is started. It parses HTTP requests to our Data Point class. That Data Point instance is deserialized by a user-provided implementation of our InfluxDBDataPointDeserializer and sent to the next Flink operator.
+
+When using Telegraf, use its [HTTP output plugin](https://docs.influxdata.com/telegraf/v1.17/plugins/#http):
+
+```toml
+[[outputs.http]]
+  url = "http://task-manager:8000/api/v2/write"
+  method = "POST"
+  data_format = "influx"
+```
+
+![Source Architecture](media/source-architecture.png)
+
+### Usage
+
+```java
+InfluxDBSource<Long> influxDBSource = InfluxBSource.builder()
+        .setDeserializer(new TestDeserializer())
+        .build()
+
+// ...
+
+/**
+ * Implementation of InfluxDBDataPointDeserializer interface
+ * (dataPoint) -----> (element)
+ *  test,longValue=1 fieldKey="fieldValue" -----------> 1L
+ *  test,longValue=2 fieldKey="fieldValue" -----------> 2L
+ *  test,longValue=3 fieldKey="fieldValue" -----------> 3L
+ */
+class TestDeserializer implements InfluxDBDataPointDeserializer<Long> {
+    @Override
+    public Long deserialize(final DataPoint dataPoint) {
+        return (Long) dataPoint.getField("longValue");
+    }
+}
+```
+
+
+### Options
+
+| Option            | Description     | Default Value     |
+| ----------------- |-----------------|:-----------------:|
+| ENQUEUE_WAIT_TIME | The time out in seconds for enqueuing an HTTP request to the queue. | 5 |
+| INGEST_QUEUE_CAPACITY | Size of queue that buffers HTTP requests data points before fetching. | 1000 |
+| MAXIMUM_LINES_PER_REQUEST | The maximum number of lines that should be parsed per HTTP request. | 10000 |
+| PORT | TCP port on which the source's HTTP server is running on. | 8000 |
+
+### Supported Data Types in Field Set
+
+| Field Set     | Support       |
+| ------------- |:-------------:|
+|    Float      | ✅            |
+|    Integer    | ✅            |
+|    UInteger   | ❌            |
+|    String     | ✅            |
+|    Boolean    | ✅            |
+
+See InfluxDB field set value [data type](https://docs.influxdata.com/influxdb/cloud/reference/syntax/line-protocol/#field-set).
+The parsing limitation is related to the Apache Druid project. For more information see this [issue](https://github.com/apache/druid/issues/10993)
+
+
+## Sink
+
+The Sink writes data points to InfluxDB using the [InfluxDB Java Client](https://github.com/influxdata/influxdb-client-java). You provide the connection information (URL, username, password, bucket, and organization) and an implementation of `InfluxDBSchemaSerializer<IN>` generic interface. The implementation of the interface overrides the `serialize(IN element, Context context)` function. This function serializes incoming Flink elements of type `IN` to [Point](https://github.com/influxdata/influxdb-client-java/blob/master/client/src/main/java/com/influxdb/client/write/Point.java) objects.
+
+It is possible to write multiple data points to InfluxDB simultaneously by separating each point with a new line. Batching data points in this manner results in much higher performance. The batch size can be set through the `WRITE_BUFFER_SIZE` option. By default, the buffer size is set to 1000 and can be changed to any value using the `setWriteBufferSize(final int bufferSize)` of the Sink builder class.
+
+It is possible to write checkpoint data points to InfluxDB whenever Flink sets a checkpoint. To enable this functionality, you need to set the `WRITE_DATA_POINT_CHECKPOINT` flag to true (default is false). The checkpoint data point looks as follow:
+```text
+checkpoint checkpoint=flink <timestamp>
+```
+The timestamp refers to the latest element that Flink serializes.
+
+### Usage
+
+```java
+// The InfluxDB Sink uses the build pattern to create a Sink object
+InfluxDBSink<Long> influxDBSink = InfluxDBSink.builder()
+        .setInfluxDBSchemaSerializer(new TestSerializer())
+        .setInfluxDBUrl(getUrl())           // http://localhost:8086
+        .setInfluxDBUsername(getUsername()) // admin
+        .setInfluxDBPassword(getPassword()) // admin
+        .setInfluxDBBucket(getBucket())     // default
+        .setInfluxDBOrganization(getOrg())  // influxdata
+        .build();
+
+// ...
+
+/**
+ * Implementation of InfluxDBSchemaSerializer interface
+ * (element) -----> (dataPoint)
+ *  1L -----------> test,longValue=1 fieldKey="fieldValue"
+ *  2L -----------> test,longValue=2 fieldKey="fieldValue"
+ *  3L -----------> test,longValue=3 fieldKey="fieldValue"
+ */
+class TestSerializer implements InfluxDBSchemaSerializer<Long> {
+
+    @Override
+    public Point serialize(Long element, Context context) {
+        final Point dataPoint = new Point("test");
+        dataPoint.addTag("longValue", String.valueOf(element));
+        dataPoint.addField("fieldKey", "fieldValue");
+        return dataPoint;
+    }
+}
+```
+
+### Options
+
+| Option            | Description   | Default Value   |
+| ----------------- |-----------------|:-----------------:|
+| WRITE_DATA_POINT_CHECKPOINT | Determines if the checkpoint data point should be written to InfluxDB or not. | false |
+| WRITE_BUFFER_SIZE | Number of elements to buffer the data before writing them to InfluxDB. | 1000 |
+| INFLUXDB_URL | InfluxDB Connection URL. | ❌ |
+| INFLUXDB_USERNAME | InfluxDB username. | ❌ |
+| INFLUXDB_PASSWORD | InfluxDB password. | ❌ |
+| INFLUXDB_BUCKET | InfluxDB bucket. | ❌ |
+| INFLUXDB_ORGANIZATION | InfluxDB organization. | ❌ |
+
+## Building the connector
+
+The connector can be built by using maven:
+
+```bash
+mvn clean install -DskipTests -pl flink-connector-influxdb2
+```
+
+## Benchmarks
+
+Some basic benchmarks were conducted.
+
+### Source
+A data generator that sends line protocol in form of HTTP requests to an REST endpoint was used for the source benchmarks.
+Throughput and latency was measured for a direct connection between the data generator and the InfluxDB source.
+A setup including Telegraf was used to benchmark the latency in a more realistic setup.
+
+### Sink
+The from sequence source was used to generate data for the sink benchmark.
+Throughput was measured without any other Flink operators, whereas the latency was measured by adding a timestamp to the event using a map operator before the sink.
+This timestamp was then compared to the insertion timestamp set by InfluxDB itself.
+
+### Visualization
+
+The results of these benchmarks are visualized [here](https://docs.google.com/presentation/d/1apd_wys0OzaiifAisABFg4B7HCydbkZXpN0OFd6cjEg/edit?usp=sharing).
+
+
+## Usage and Deployment Example
+
+See [`Shark/flink-connector-influxdb-example`](https://github.com/Shark/flink-connector-influxdb-example) for an example showing you how to use and deploy the InfluxDB source and sink connectors in a Flink application on a Kubernetes cluster.
+
+## Future Work
+
+* [Source] Dynamic (unprivileged) ports for HTTP server
+* [Source] Integration with Kubernetes service discovery in conjunction with dynamic ports
+* [Source] Use multi-threaded HTTP server
+* [Sink] Flush write buffer after an inactivity timeout
+
+## Contributors
+
+<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->
+<table>
+  <tr class="noBorder">
+    <td class="noBorder" align="center">
+        <a href="https://github.com/1p4pk"><img class="roundImg"
+         src="https://avatars.githubusercontent.com/u/32157576?v=4?s=100"width="100px;"/><br /><sub><b>Leon Papke</b></sub>
+         </a>
+     </td>
+    <td class="noBorder" align="center">
+        <a href="https://github.com/raminqaf"><img class="roundImg" src="https://avatars.githubusercontent.com/u/20357405?v=4?s=100" width="100px;"/><br /><sub><b>Ramin Gharib</b></sub>
+        </a>
+    </td>
+    <td  class="noBorder" align="center">
+    <a href="https://github.com/Shark"><img class="roundImg"  src="https://avatars.githubusercontent.com/u/53632?v=4?s=100" width="100px;" alt=""/>        <br /><sub><b>Felix Seidel</b></sub></a>
+    </td>
+  </tr>
+</table>
+<!-- ALL-CONTRIBUTORS-LIST:END -->
+
+This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome!
diff --git a/site/docs/flink/1.1.0/flink-streaming-kudu.md b/site/docs/flink/1.1.0/flink-streaming-kudu.md
new file mode 100644
index 0000000..356adb1
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-kudu.md
@@ -0,0 +1,335 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for Apache Kudu
+description: Apache Flink Streaming Connector for Apache Kudu
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink Kudu Connector
+
+This connector provides a source (```KuduInputFormat```), a sink/output
+(```KuduSink``` and ```KuduOutputFormat```, respectively),
+as well a table source (`KuduTableSource`), an upsert table sink (`KuduTableSink`), and a catalog (`KuduCatalog`),
+to allow reading and writing to [Kudu](https://kudu.apache.org/).
+
+To use this connector, add the following dependency to your project:
+
+<dependency>
+  <groupId>org.apache.bahir</groupId>
+  <artifactId>flink-connector-kudu_2.11</artifactId>
+  <version>1.1.0</version>
+</dependency>
+*Version Compatibility*: This module is compatible with Apache Kudu *1.11.1* (last stable version) and Apache Flink 1.10.+.
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.10/dev/projectsetup/dependencies.html).
+
+## Installing Kudu
+
+Follow the instructions from the [Kudu Installation Guide](https://kudu.apache.org/docs/installation.html).
+Optionally, you can use the docker images provided in dockers folder.
+
+## SQL and Table API
+
+The Kudu connector is fully integrated with the Flink Table and SQL APIs. Once we configure the Kudu catalog (see next section)
+we can start querying or inserting into existing Kudu tables using the Flink SQL or Table API.
+
+For more information about the possible queries please check the [official documentation](https://ci.apache.org/projects/flink/flink-docs-release-1.10/dev/table/sql/)
+
+### Kudu Catalog
+
+The connector comes with a catalog implementation to handle metadata about your Kudu setup and perform table management.
+By using the Kudu catalog, you can access all the tables already created in Kudu from Flink SQL queries. The Kudu catalog only
+allows users to create or access existing Kudu tables. Tables using other data sources must be defined in other catalogs such as
+in-memory catalog or Hive catalog.
+
+When using the SQL CLI you can easily add the Kudu catalog to your environment yaml file:
+
+```
+catalogs:
+  - name: kudu
+    type: kudu
+    kudu.masters: <host>:7051
+```
+Once the SQL CLI is started you can simply switch to the Kudu catalog by calling `USE CATALOG kudu;`
+
+You can also create and use the KuduCatalog directly in the Table environment:
+
+```java
+String KUDU_MASTERS="host1:port1,host2:port2"
+KuduCatalog catalog = new KuduCatalog(KUDU_MASTERS);
+tableEnv.registerCatalog("kudu", catalog);
+tableEnv.useCatalog("kudu");
+```
+### DDL operations using SQL
+
+It is possible to manipulate Kudu tables using SQL DDL.
+
+When not using the Kudu catalog, the following additional properties must be specified in the `WITH` clause:
+
+* `'connector.type'='kudu'`
+* `'kudu.masters'='host1:port1,host2:port2,...'`: comma-delimitered list of Kudu masters
+* `'kudu.table'='...'`: The table's name within the Kudu database.
+
+If you have registered and are using the Kudu catalog, these properties are handled automatically.
+
+To create a table, the additional properties `kudu.primary-key-columns` and `kudu.hash-columns` must be specified
+as comma-delimited lists. Optionally, you can set the `kudu.replicas` property (defaults to 1).
+Other properties, such as range partitioning, cannot be configured here - for more flexibility, please use
+`catalog.createTable` as described in [this](#Creating-a-KuduTable-directly-with-KuduCatalog) section or create the table directly in Kudu.
+
+The `NOT NULL` constraint can be added to any of the column definitions.
+By setting a column as a primary key, it will automatically by created with the `NOT NULL` constraint.
+Hash columns must be a subset of primary key columns.
+
+Kudu Catalog
+
+```
+CREATE TABLE TestTable (
+  first STRING,
+  second STRING,
+  third INT NOT NULL
+) WITH (
+  'kudu.hash-columns' = 'first',
+  'kudu.primary-key-columns' = 'first,second'
+)
+```
+Other catalogs
+
+```
+CREATE TABLE TestTable (
+  first STRING,
+  second STRING,
+  third INT NOT NULL
+) WITH (
+  'connector.type' = 'kudu',
+  'kudu.masters' = '...',
+  'kudu.table' = 'TestTable',
+  'kudu.hash-columns' = 'first',
+  'kudu.primary-key-columns' = 'first,second'
+)
+```
+Renaming a table:
+
+```
+ALTER TABLE TestTable RENAME TO TestTableRen
+```
+Dropping a table:
+
+```sql
+DROP TABLE TestTableRen
+```
+#### Creating a KuduTable directly with KuduCatalog
+
+The KuduCatalog also exposes a simple `createTable` method that required only the where table configuration,
+including schema, partitioning, replication, etc. can be specified using a `KuduTableInfo` object.
+
+Use the `createTableIfNotExists` method, that takes a `ColumnSchemasFactory` and
+a `CreateTableOptionsFactory` parameter, that implement respectively `getColumnSchemas()`
+returning a list of Kudu [ColumnSchema](https://kudu.apache.org/apidocs/org/apache/kudu/ColumnSchema.html) objects;
+and  `getCreateTableOptions()` returning a
+[CreateTableOptions](https://kudu.apache.org/apidocs/org/apache/kudu/client/CreateTableOptions.html) object.
+
+This example shows the creation of a table called `ExampleTable` with two columns,
+`first` being a primary key; and configuration of replicas and hash partitioning.
+
+```java
+KuduTableInfo tableInfo = KuduTableInfo
+    .forTable("ExampleTable")
+    .createTableIfNotExists(
+        () ->
+            Lists.newArrayList(
+                new ColumnSchema
+                    .ColumnSchemaBuilder("first", Type.INT32)
+                    .key(true)
+                    .build(),
+                new ColumnSchema
+                    .ColumnSchemaBuilder("second", Type.STRING)
+                    .build()
+            ),
+        () -> new CreateTableOptions()
+            .setNumReplicas(1)
+            .addHashPartitions(Lists.newArrayList("first"), 2));
+
+catalog.createTable(tableInfo, false);
+```
+The example uses lambda expressions to implement the functional interfaces.
+
+Read more about Kudu schema design in the [Kudu docs](https://kudu.apache.org/docs/schema_design.html).
+
+### Supported data types
+
+
+| Flink/SQL      |      Kudu      |
+| ---------------- | :---------------: |
+| `STRING`       |     STRING     |
+| `BOOLEAN`      |      BOOL      |
+| `TINYINT`      |      INT8      |
+| `SMALLINT`     |      INT16      |
+| `INT`          |      INT32      |
+| `BIGINT`       |      INT64      |
+| `FLOAT`        |      FLOAT      |
+| `DOUBLE`       |     DOUBLE     |
+| `BYTES`        |     BINARY     |
+| `TIMESTAMP(3)` | UNIXTIME_MICROS |
+
+Note:
+
+* `TIMESTAMP`s are fixed to a precision of 3, and the corresponding Java conversion class is `java.sql.Timestamp`
+* `BINARY` and `VARBINARY` are not yet supported - use `BYTES`, which is a `VARBINARY(2147483647)`
+* `CHAR` and `VARCHAR` are not yet supported - use `STRING`, which is a `VARCHAR(2147483647)`
+* `DECIMAL` types are not yet supported
+
+### Lookup Cache
+
+Kudu connector can be used in temporal join as a lookup source (aka. dimension table). Currently, only sync lookup mode is supported.
+
+By default, lookup cache is not enabled. You can enable it by setting both `lookup.cache.max-rows` and `lookup.cache.ttl`.
+
+The lookup cache is used to improve performance of temporal join theKudu connector. By default, lookup cache is not enabled, so all the requests are sent to external database. When lookup cache is enabled, each process (i.e. TaskManager) will hold a cache. Flink will lookup the cache first, and only send requests to external database when cache missing, and update cache with the rows returned. The oldest rows in cache will be expired when the cache hit to the max cached rows `kudu.lookup.cache.max-rows` or when the row exceeds the max time to live `kudu.lookup.cache.ttl`. The cached rows might not be the latest, users can tune `kudu.lookup.cache.ttl` to a smaller value to have a better fresh data, but this may increase the number of requests send to database. So this is a balance between throughput and correctness.
+
+Reference :[Flink Jdbc Connector](https://nightlies.apache.org/flink/flink-docs-release-1.15/docs/connectors/table/jdbc/#lookup-cache)
+
+
+### Known limitations
+
+* Data type limitations (see above).
+* SQL Create table: primary keys can only be set by the `kudu.primary-key-columns` property, using the
+  `PRIMARY KEY` constraint is not yet possible.
+* SQL Create table: range partitioning is not supported.
+* When getting a table through the Catalog, NOT NULL and PRIMARY KEY constraints are ignored. All columns
+  are described as being nullable, and not being primary keys.
+* Kudu tables cannot be altered through the catalog other than simple renaming
+
+## DataStream API
+
+It is also possible to use the Kudu connector directly from the DataStream API however we
+encourage all users to explore the Table API as it provides a lot of useful tooling when working
+with Kudu data.
+
+### Reading tables into a DataStreams
+
+There are 2 main ways of reading a Kudu Table into a DataStream
+
+1. Using the `KuduCatalog` and the Table API
+2. Using the `KuduRowInputFormat` directly
+
+Using the `KuduCatalog` and Table API is the recommended way of reading tables as it automatically
+guarantees type safety and takes care of configuration of our readers.
+
+This is how it works in practice:
+
+```java
+StreamTableEnvironment tableEnv = StreamTableEnvironment.create(streamEnv, tableSettings);
+
+tableEnv.registerCatalog("kudu", new KuduCatalog("master:port"));
+tableEnv.useCatalog("kudu");
+
+Table table = tableEnv.sqlQuery("SELECT * FROM MyKuduTable");
+DataStream<Row> rows = tableEnv.toAppendStream(table, Row.class);
+```
+The second way of achieving the same thing is by using the `KuduRowInputFormat` directly.
+In this case we have to manually provide all information about our table:
+
+```java
+KuduTableInfo tableInfo = ...
+KuduReaderConfig readerConfig = ...
+KuduRowInputFormat inputFormat = new KuduRowInputFormat(readerConfig, tableInfo);
+
+DataStream<Row> rowStream = env.createInput(inputFormat, rowTypeInfo);
+```
+At the end of the day the `KuduTableSource` is just a convenient wrapper around the `KuduRowInputFormat`.
+
+### Kudu Sink
+
+The connector provides a `KuduSink` class that can be used to consume DataStreams
+and write the results into a Kudu table.
+
+The constructor takes 3 or 4 arguments.
+
+* `KuduWriterConfig` is used to specify the Kudu masters and the flush mode.
+* `KuduTableInfo` identifies the table to be written
+* `KuduOperationMapper` maps the records coming from the DataStream to a list of Kudu operations.
+* `KuduFailureHandler` (optional): If you want to provide your own logic for handling writing failures.
+
+The example below shows the creation of a sink for Row type records of 3 fields. It Upserts each record.
+It is assumed that a Kudu table with columns `col1, col2, col3` called `AlreadyExistingTable` exists. Note that if this were not the case,
+we could pass a `KuduTableInfo` as described in the [Catalog - Creating a table](#creating-a-table) section,
+and the sink would create the table with the provided configuration.
+
+```java
+KuduWriterConfig writerConfig = KuduWriterConfig.Builder.setMasters(KUDU_MASTERS).build();
+
+KuduSink<Row> sink = new KuduSink<>(
+    writerConfig,
+    KuduTableInfo.forTable("AlreadyExistingTable"),
+    new RowOperationMapper<>(
+            new String[]{"col1", "col2", "col3"},
+            AbstractSingleOperationMapper.KuduOperation.UPSERT)
+)
+```
+#### KuduOperationMapper
+
+This section describes the Operation mapping logic in more detail.
+
+The connector supports insert, upsert, update, and delete operations.
+The operation to be performed can vary dynamically based on the record.
+To allow for more flexibility, it is also possible for one record to trigger
+0, 1, or more operations.
+For the highest level of control, implement the `KuduOperationMapper` interface.
+
+If one record from the DataStream corresponds to one table operation,
+extend the `AbstractSingleOperationMapper` class. An array of column
+names must be provided. This must match the Kudu table's schema.
+
+The `getField` method must be overridden, which extracts the value for the table column whose name is
+at the `i`th place in the `columnNames` array.
+If the operation is one of (`CREATE, UPSERT, UPDATE, DELETE`)
+and doesn't depend on the input record (constant during the life of the sink), it can be set in the constructor
+of `AbstractSingleOperationMapper`.
+It is also possible to implement your own logic by overriding the
+`createBaseOperation` method that returns a Kudu [Operation](https://kudu.apache.org/apidocs/org/apache/kudu/client/Operation.html).
+
+There are pre-defined operation mappers for Pojo, Flink Row, and Flink Tuple types for constant operation, 1-to-1 sinks.
+
+* `PojoOperationMapper`: Each table column must correspond to a POJO field
+  with the same name. The  `columnNames` array should contain those fields of the POJO that
+  are present as table columns (the POJO fields can be a superset of table columns).
+* `RowOperationMapper` and `TupleOperationMapper`: the mapping is based on position. The
+  `i`th field of the Row/Tuple corresponds to the column of the table at the `i`th
+  position in the `columnNames` array.
+
+## Building the connector
+
+The connector can be easily built by using maven:
+
+```
+cd bahir-flink
+mvn clean install
+```
+### Running the tests
+
+The integration tests rely on the Kudu test harness which requires the current user to be able to ssh to localhost.
+
+This might not work out of the box on some operating systems (such as Mac OS X).
+To solve this problem go to *System Preferences/Sharing* and enable Remote login for your user.
diff --git a/site/docs/flink/1.1.0/flink-streaming-netty.md b/site/docs/flink/1.1.0/flink-streaming-netty.md
new file mode 100644
index 0000000..61bdf47
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-netty.md
@@ -0,0 +1,94 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for Netty
+description: Apache Flink Streaming Connector for Netty
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink Netty Connector
+
+This connector provides tcp source and http source for receiving push data, implemented by [Netty](http://netty.io). 
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.2/dev/linking.html).
+
+## Data Flow
+
+```
++-------------+      (2)    +------------------------+
+| user system |    <-----   | Third Register Service |           
++-------------+             +------------------------+
+       |                                ^
+       | (3)                            |
+       |                                |
+       V                                |
++--------------------+                  |
+| Flink Netty Source |  ----------------+
++--------------------+         (1)
+```
+
+There are three components:
+
+*   User System - where the data stream is coming from
+*   Third Register Service - receive `Flink Netty Source`'s register request (ip and port)
+*   Flink Netty Source - Netty Server for receiving pushed streaming data from `User System`
+
+
+## Maven Dependency
+To use this connector, add the following dependency to your project:
+
+```
+<dependency>
+  <groupId>org.apache.bahir</groupId>
+  <artifactId>flink-connector-netty_2.11</artifactId>
+  <version>1.1.0</version>
+</dependency>
+```
+
+## Usage
+
+*Tcp Source:*
+
+```
+val env = StreamExecutionEnvironment.getExecutionEnvironment
+env.addSource(new TcpReceiverSource("msg", 7070, Some("http://localhost:9090/cb")))
+```
+>paramKey:  the http query param key
+>tryPort:   try to use this point, if this point is used then try a new port
+>callbackUrl:   register connector's ip and port to a `Third Register Service`
+
+*Http Source:*
+
+```
+val env = StreamExecutionEnvironment.getExecutionEnvironment
+env.addSource(new TcpReceiverSource(7070, Some("http://localhost:9090/cb")))
+```
+>tryPort:   try to use this port, if this point is used then try a new port
+>callbackUrl:   register connector's ip and port to a `Third Register Service`
+
+## Full Example 
+
+There are two example to get started:
+
+*   [StreamSqlExample](https://github.com/apache/bahir-flink/blob/master/flink-connector-netty/src/test/scala/org/apache/flink/streaming/connectors/netty/example/StreamSqlExample.scala)
+*   [TcpSourceExample](https://github.com/apache/bahir-flink/blob/master/flink-connector-netty/src/test/scala/org/apache/flink/streaming/connectors/netty/example/TcpSourceExample.scala)
diff --git a/site/docs/flink/1.1.0/flink-streaming-pinot.md b/site/docs/flink/1.1.0/flink-streaming-pinot.md
new file mode 100644
index 0000000..533c471
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-pinot.md
@@ -0,0 +1,156 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for Pinot
+description: Apache Flink Streaming Connector for Pinot
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink Pinot Connector
+
+This connector provides a sink to [Apache Pinot](http://pinot.apache.org/)™.  
+To use this connector, add the following dependency to your project:
+
+    <dependency>
+      <groupId>org.apache.bahir</groupId>
+      <artifactId>flink-connector-pinot_2.11</artifactId>
+      <version>1.1.0</version>
+    </dependency>
+
+*Version Compatibility*: This module is compatible with Pinot 0.6.0.
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.2/dev/linking.html).
+
+The sink class is called `PinotSink`.
+
+## Architecture
+
+The Pinot sink stores elements from upstream Flink tasks in an Apache Pinot table.
+We support two execution modes
+* `RuntimeExecutionMode.BATCH`
+* `RuntimeExecutionMode.STREAMING` which requires checkpointing to be enabled.
+
+### PinotSinkWriter
+
+Whenever the sink receives elements from upstream tasks, they are received by an instance of the PinotSinkWriter.
+The `PinotSinkWriter` holds a list of `PinotWriterSegment`s where each `PinotWriterSegment` is capable of storing `maxRowsPerSegment` elements.
+Whenever the maximum number of elements to hold is not yet reached the `PinotWriterSegment` is considered to be active.
+Once the maximum number of elements to hold was reached, an active `PinotWriterSegment` gets inactivated and a new empty `PinotWriterSegment` is created.
+
+<img height="225" alt="PinotSinkWriter" src="docs/images/PinotSinkWriter.png">
+
+Thus, there is always one active `PinotWriterSegment` that new incoming elements will go to.
+Over time, the list of `PinotWriterSegment` per `PinotSinkWriter` increases up to the point where a checkpoint is created.
+
+**Checkpointing**
+
+On checkpoint creation `PinotSinkWriter.prepareCommit` gets called by the Flink environment.
+This triggers the creation of `PinotSinkCommittable`s where each inactive `PinotWriterSegment` creates exactly one `PinotSinkCommittable`.
+
+<img height="250" alt="PinotSinkWriter prepareCommit" src="docs/images/PinotSinkWriter_prepareCommit.png">
+
+In order to create a `PinotSinkCommittable`, a file containing a `PinotWriterSegment`'s elements is on the shared filesystem defined via `FileSystemAdapter`.
+The file contains a list of elements in JSON format. The serialization is done via `JSONSerializer`.
+A `PinotSinkCommittables` then holds the path to the data file on the shared filesystem as well as the minimum and maximum timestamp of all contained elements (extracted via `EventTimeExtractor`).
+
+
+### PinotSinkGlobalCommitter
+
+In order to be able to follow the guidelines for Pinot segment naming, we need to include the minimum and maximum timestamp of an element in the metadata of a segment and in its name.
+The minimum and maximum timestamp of all elements between two checkpoints is determined at a parallelism of 1 in the `PinotSinkGlobalCommitter`.
+This procedure allows recovering from failure by deleting previously uploaded segments which prevents having duplicate segments in the Pinot table.
+
+<img height="250" alt="PinotSinkGlobalCommitter combine" src="docs/images/PinotSinkGlobalCommitter_combine.png">
+
+After all `PinotSinkWriter` subtasks emitted their `PinotSinkCommittable`s, they are sent to the `PinotSinkGlobalCommitter` which first combines all collected `PinotSinkCommittable`s into a single `PinotSinkGlobalCommittable`.
+Therefore, the minimum and maximum timestamps of all collected `PinotSinkCommittable`s is determined. 
+Moreover, the `PinotSinkGlobalCommittable` holds references to all data files from the `PinotSinkCommittable`s.
+
+When finally committing a `PinotSinkGlobalCommittable` the following procedure is executed:
+* Read all data files from the shared filesystem (using `FileSystemAdapter`).
+* Generate Pinot segment names using `PinotSinkSegmentNameGenerator`.
+* Create Pinot segments with minimum and maximum timestamps (stored in `PinotSinkGlobalCommittable`) and previously generated segment assigned.
+* Upload Pinot segments to the Pinot controller
+
+
+## Delivery Guarantees
+
+Resulting from the above described architecture the `PinotSink` provides an at-least-once delivery guarantee.
+While the failure recovery mechanism ensures that duplicate segments are prevented, there might be temporary inconsistencies in the Pinot table which can result in downstream tasks receiving an element multiple times.
+
+## Options
+
+| Option                 | Description                                                                      |
+| ---------------------- | -------------------------------------------------------------------------------- | 
+| `pinotControllerHost`  | Host of the Pinot controller                                                     |
+| `pinotControllerPort`  | Port of the Pinot controller                                                     |
+| `tableName`            | Target Pinot table's name                                                        |
+| `maxRowsPerSegment`    | Maximum number of rows to be stored within a Pinot segment                       |
+| `tempDirPrefix`         | Prefix for temp directories used                                                  |
+| `jsonSerializer`       | Serializer used to convert elements to JSON                                      |
+| `eventTimeExtractor`   | Defines the way event times are extracted from received objects                   |
+| `segmentNameGenerator` | Pinot segment name generator                                                     |
+| `fsAdapter`            | Filesystem adapter used to save files for sharing files across nodes               |
+| `numCommitThreads`     | Number of threads used in the `PinotSinkGlobalCommitter` for committing segments |
+
+## Usage
+
+```java
+StreamExecutionEnvironment env = ...
+// Checkpointing needs to be enabled when executing in STREAMING mode
+        env.enableCheckpointing(long interval);
+
+        DataStream<PinotRow> dataStream = ...
+        PinotSink pinotSink = new PinotSink.Builder<PinotRow>(String pinotControllerHost, String pinotControllerPort, String tableName)
+
+        // Serializes a PinotRow to JSON format
+        .withJsonSerializer(JsonSerializer<PinotRow> jsonSerializer)
+
+        // Extracts the timestamp from a PinotRow
+        .withEventTimeExtractor(EventTimeExtractor<IN> eventTimeExtractor)
+
+        // Defines the segment name generation via the predefined SimpleSegmentNameGenerator
+        // Exemplary segment name: tableName_minTimestamp_maxTimestamp_segmentNamePostfix_0
+        .withSimpleSegmentNameGenerator(String tableName, String segmentNamePostfix)
+
+        // Use a custom segment name generator if the SimpleSegmentNameGenerator does not work for your use case
+        .withSegmentNameGenerator(SegmentNameGenerator segmentNameGenerator)
+
+        // Use a custom filesystem adapter. 
+        // CAUTION: Make sure all nodes your Flink app runs on can access the shared filesystem via the provided FileSystemAdapter
+        .withFileSystemAdapter(FileSystemAdapter fsAdapter)
+
+        // Defines the size of the Pinot segments
+        .withMaxRowsPerSegment(int maxRowsPerSegment)
+
+        // Prefix within the local filesystem's temp directory used for storing intermediate files
+        .withTempDirectoryPrefix(String tempDirPrefix)
+        
+        // Number of threads used in the `PinotSinkGlobalCommitter` to commit a batch of segments
+        // Optional - Default is 4
+        .withNumCommitThreads(int numCommitThreads)
+
+        // Builds the PinotSink
+        .build()
+        dataStream.addSink(pinotSink);
+```
diff --git a/site/docs/flink/1.1.0/flink-streaming-redis.md b/site/docs/flink/1.1.0/flink-streaming-redis.md
new file mode 100644
index 0000000..edb4464
--- /dev/null
+++ b/site/docs/flink/1.1.0/flink-streaming-redis.md
@@ -0,0 +1,176 @@
+---
+layout: page
+title: Apache Flink Streaming Connector for Redis
+description: Apache Flink Streaming Connector for Redis
+group: nav-right
+---
+<!--
+{% comment %}
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to you under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+{% endcomment %}
+-->
+
+{% include JB/setup %}
+
+# Flink Redis Connector
+
+This connector provides a Sink that can write to [Redis](http://redis.io/) and also can publish data 
+to [Redis PubSub](http://redis.io/topics/pubsub). To use this connector, add the
+following dependency to your project:
+
+    <dependency>
+      <groupId>org.apache.bahir</groupId>
+      <artifactId>flink-connector-redis_2.11</artifactId>
+      <version>1.1.0</version>
+    </dependency>
+
+*Version Compatibility*: This module is compatible with Redis 2.8.5.
+
+Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
+See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.2/dev/linking.html).
+
+## Installing Redis
+
+Follow the instructions from the [Redis download page](http://redis.io/download).
+
+
+## Redis Sink
+
+A class providing an interface for sending data to Redis.
+The sink can use three different methods for communicating with different type of Redis environments:
+
+1. Single Redis Server
+2. Redis Cluster
+3. Redis Sentinel
+
+This code shows how to create a sink that communicate to a single redis server:
+
+**Java:**
+
+
+    public static class RedisExampleMapper implements RedisMapper<Tuple2<String, String>>{
+
+        @Override
+        public RedisCommandDescription getCommandDescription() {
+            return new RedisCommandDescription(RedisCommand.HSET, "HASH_NAME");
+        }
+
+        @Override
+        public String getKeyFromData(Tuple2<String, String> data) {
+            return data.f0;
+        }
+
+        @Override
+        public String getValueFromData(Tuple2<String, String> data) {
+            return data.f1;
+        }
+    }
+    FlinkJedisPoolConfig conf = new FlinkJedisPoolConfig.Builder().setHost("127.0.0.1").build();
+
+    DataStream<String> stream = ...;
+    stream.addSink(new RedisSink<Tuple2<String, String>>(conf, new RedisExampleMapper());
+
+
+
+**Scala:**
+
+    class RedisExampleMapper extends RedisMapper[(String, String)]{
+      override def getCommandDescription: RedisCommandDescription = {
+        new RedisCommandDescription(RedisCommand.HSET, "HASH_NAME")
+      }
+
+      override def getKeyFromData(data: (String, String)): String = data._1
+
+      override def getValueFromData(data: (String, String)): String = data._2
+    }
+    val conf = new FlinkJedisPoolConfig.Builder().setHost("127.0.0.1").build()
+    stream.addSink(new RedisSink[(String, String)](conf, new RedisExampleMapper))
+
+
+
+This example code does the same, but for Redis Cluster:
+
+**Java:**
+
+    FlinkJedisPoolConfig conf = new FlinkJedisClusterConfig.Builder()
+        .setNodes(new HashSet<InetSocketAddress>(Arrays.asList(new InetSocketAddress(5601)))).build();
+
+    DataStream<String> stream = ...;
+    stream.addSink(new RedisSink<Tuple2<String, String>>(conf, new RedisExampleMapper());
+
+**Scala:**
+
+
+    val conf = new FlinkJedisClusterConfig.Builder().setNodes(...).build()
+    stream.addSink(new RedisSink[(String, String)](conf, new RedisExampleMapper))
+
+
+This example shows when the Redis environment is with Sentinels:
+
+Java:
+
+    FlinkJedisSentinelConfig conf = new FlinkJedisSentinelConfig.Builder()
+        .setMasterName("master").setSentinels(...).build();
+
+    DataStream<String> stream = ...;
+    stream.addSink(new RedisSink<Tuple2<String, String>>(conf, new RedisExampleMapper());
+ 
+
+Scala:
+
+    val conf = new FlinkJedisSentinelConfig.Builder().setMasterName("master").setSentinels(...).build()
+    stream.addSink(new RedisSink[(String, String)](conf, new RedisExampleMapper))
+
+
+This section gives a description of all the available data types and what Redis command used for that.
+
+<table class="table table-bordered" style="width: 75%">
+    <thead>
+        <tr>
+          <th class="text-center" style="width: 20%">Data Type</th>
+          <th class="text-center" style="width: 25%">Redis Command [Sink]</th>
+        </tr>
+      </thead>
+      <tbody>
+        <tr>
+            <td>HASH</td><td><a href="http://redis.io/commands/hset">HSET</a></td>
+        </tr>
+        <tr>
+            <td>LIST</td><td>
+                <a href="http://redis.io/commands/rpush">RPUSH</a>,
+                <a href="http://redis.io/commands/lpush">LPUSH</a>
+            </td>
+        </tr>
+        <tr>
+            <td>SET</td><td><a href="http://redis.io/commands/sadd">SADD</a></td>
+        </tr>
+        <tr>
+            <td>PUBSUB</td><td><a href="http://redis.io/commands/publish">PUBLISH</a></td>
+        </tr>
+        <tr>
+            <td>STRING</td><td><a href="http://redis.io/commands/set">SET</a></td>
+        </tr>
+        <tr>
+            <td>HYPER_LOG_LOG</td><td><a href="http://redis.io/commands/pfadd">PFADD</a></td>
+        </tr>
+        <tr>
+            <td>SORTED_SET</td><td><a href="http://redis.io/commands/zadd">ZADD</a></td>
+        </tr>
+        <tr>
+            <td>SORTED_SET</td><td><a href="http://redis.io/commands/zrem">ZREM</a></td>
+        </tr>
+      </tbody>
+</table>
diff --git a/site/docs/flink/current/documentation.md b/site/docs/flink/current/documentation.md
index 969c97f..b18eb70 100644
--- a/site/docs/flink/current/documentation.md
+++ b/site/docs/flink/current/documentation.md
@@ -1,7 +1,7 @@
 ---
 layout: page
-title: Extensions for Apache Flink (1.1-SNAPSHOT)
-description: Extensions for Apache Flink (1.1-SNAPSHOT)
+title: Extensions for Apache Flink (1.2-SNAPSHOT)
+description: Extensions for Apache Flink (1.2-SNAPSHOT)
 group: nav-right
 ---
 <!--
@@ -39,12 +39,12 @@
 
 [InfluxDB connector](../flink-streaming-influxdb)
 
-[InfluxDB2 connector](../flink-streaming-influxdb2) ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
+[InfluxDB2 connector](../flink-streaming-influxdb2)
 
 [Kudu connector](../flink-streaming-kudu)
 
 [Netty connector](../flink-streaming-netty)
 
-[Pinot connector](../flink-streaming-pinot) ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
+[Pinot connector](../flink-streaming-pinot)
 
 [Redis connector](../flink-streaming-redis)
diff --git a/site/docs/flink/current/flink-streaming-activemq.md b/site/docs/flink/current/flink-streaming-activemq.md
index d94bf50..1e16002 100644
--- a/site/docs/flink/current/flink-streaming-activemq.md
+++ b/site/docs/flink/current/flink-streaming-activemq.md
@@ -33,7 +33,7 @@
     <dependency>
       <groupId>org.apache.bahir</groupId>
       <artifactId>flink-connector-activemq_2.11</artifactId>
-      <version>1.1-SNAPSHOT</version>
+      <version>1.2-SNAPSHOT</version>
     </dependency>
 
 *Version Compatibility*: This module is compatible with ActiveMQ 5.14.0.
diff --git a/site/docs/flink/current/flink-streaming-akka.md b/site/docs/flink/current/flink-streaming-akka.md
index 0af3e13..86a415e 100644
--- a/site/docs/flink/current/flink-streaming-akka.md
+++ b/site/docs/flink/current/flink-streaming-akka.md
@@ -33,7 +33,7 @@
     <dependency>
       <groupId>org.apache.bahir</groupId>
       <artifactId>flink-connector-akka_2.11</artifactId>
-      <version>1.1-SNAPSHOT</version>
+      <version>1.2-SNAPSHOT</version>
     </dependency>
     
 *Version Compatibility*: This module is compatible with Akka 2.0+.
diff --git a/site/docs/flink/current/flink-streaming-flume.md b/site/docs/flink/current/flink-streaming-flume.md
index c5e9a89..19cf545 100644
--- a/site/docs/flink/current/flink-streaming-flume.md
+++ b/site/docs/flink/current/flink-streaming-flume.md
@@ -33,7 +33,7 @@
     <dependency>
       <groupId>org.apache.bahir</groupId>
       <artifactId>flink-connector-flume_2.11</artifactId>
-      <version>1.1-SNAPSHOT</version>
+      <version>1.2-SNAPSHOT</version>
     </dependency>
 
 *Version Compatibility*: This module is compatible with Flume 1.8.0.
diff --git a/site/docs/flink/current/flink-streaming-influxdb.md b/site/docs/flink/current/flink-streaming-influxdb.md
index fe0d946..acb36eb 100644
--- a/site/docs/flink/current/flink-streaming-influxdb.md
+++ b/site/docs/flink/current/flink-streaming-influxdb.md
@@ -33,7 +33,7 @@
     <dependency>
       <groupId>org.apache.bahir</groupId>
       <artifactId>flink-connector-influxdb_2.11</artifactId>
-      <version>1.1-SNAPSHOT</version>
+      <version>1.2-SNAPSHOT</version>
     </dependency>
 
 *Version Compatibility*: This module is compatible with InfluxDB 1.3.x   
diff --git a/site/docs/flink/current/flink-streaming-influxdb2.md b/site/docs/flink/current/flink-streaming-influxdb2.md
index 5c4b99e..217bbd3 100644
--- a/site/docs/flink/current/flink-streaming-influxdb2.md
+++ b/site/docs/flink/current/flink-streaming-influxdb2.md
@@ -41,7 +41,7 @@
 <dependency>
   <groupId>org.apache.bahir</groupId>
   <artifactId>flink-connector-influxdb2_2.12</artifactId>
-  <version>1.1-SNAPSHOT</version>
+  <version>1.2-SNAPSHOT</version>
 </dependency>
 ```
 
diff --git a/site/docs/flink/current/flink-streaming-kudu.md b/site/docs/flink/current/flink-streaming-kudu.md
index 28dec2c..e1126f0 100644
--- a/site/docs/flink/current/flink-streaming-kudu.md
+++ b/site/docs/flink/current/flink-streaming-kudu.md
@@ -29,18 +29,17 @@
 
 This connector provides a source (```KuduInputFormat```), a sink/output
 (```KuduSink``` and ```KuduOutputFormat```, respectively),
- as well a table source (`KuduTableSource`), an upsert table sink (`KuduTableSink`), and a catalog (`KuduCatalog`),
- to allow reading and writing to [Kudu](https://kudu.apache.org/).
+as well a table source (`KuduTableSource`), an upsert table sink (`KuduTableSink`), and a catalog (`KuduCatalog`),
+to allow reading and writing to [Kudu](https://kudu.apache.org/).
 
 To use this connector, add the following dependency to your project:
 
-    <dependency>
-      <groupId>org.apache.bahir</groupId>
-      <artifactId>flink-connector-kudu_2.11</artifactId>
-      <version>1.1-SNAPSHOT</version>
-    </dependency>
-
- *Version Compatibility*: This module is compatible with Apache Kudu *1.11.1* (last stable version) and Apache Flink 1.10.+.
+<dependency>
+  <groupId>org.apache.bahir</groupId>
+  <artifactId>flink-connector-kudu_2.11</artifactId>
+  <version>1.2-SNAPSHOT</version>
+</dependency>
+*Version Compatibility*: This module is compatible with Apache Kudu *1.11.1* (last stable version) and Apache Flink 1.10.+.
 
 Note that the streaming connectors are not part of the binary distribution of Flink. You need to link them into your job jar for cluster execution.
 See how to link with them for cluster execution [here](https://ci.apache.org/projects/flink/flink-docs-release-1.10/dev/projectsetup/dependencies.html).
@@ -72,7 +71,6 @@
     type: kudu
     kudu.masters: <host>:7051
 ```
-
 Once the SQL CLI is started you can simply switch to the Kudu catalog by calling `USE CATALOG kudu;`
 
 You can also create and use the KuduCatalog directly in the Table environment:
@@ -83,12 +81,12 @@
 tableEnv.registerCatalog("kudu", catalog);
 tableEnv.useCatalog("kudu");
 ```
-
 ### DDL operations using SQL
 
 It is possible to manipulate Kudu tables using SQL DDL.
 
 When not using the Kudu catalog, the following additional properties must be specified in the `WITH` clause:
+
 * `'connector.type'='kudu'`
 * `'kudu.masters'='host1:port1,host2:port2,...'`: comma-delimitered list of Kudu masters
 * `'kudu.table'='...'`: The table's name within the Kudu database.
@@ -116,8 +114,8 @@
   'kudu.primary-key-columns' = 'first,second'
 )
 ```
-
 Other catalogs
+
 ```
 CREATE TABLE TestTable (
   first STRING,
@@ -131,17 +129,16 @@
   'kudu.primary-key-columns' = 'first,second'
 )
 ```
-
 Renaming a table:
+
 ```
 ALTER TABLE TestTable RENAME TO TestTableRen
 ```
-
 Dropping a table:
+
 ```sql
 DROP TABLE TestTableRen
 ```
-
 #### Creating a KuduTable directly with KuduCatalog
 
 The KuduCatalog also exposes a simple `createTable` method that required only the where table configuration,
@@ -150,7 +147,7 @@
 Use the `createTableIfNotExists` method, that takes a `ColumnSchemasFactory` and
 a `CreateTableOptionsFactory` parameter, that implement respectively `getColumnSchemas()`
 returning a list of Kudu [ColumnSchema](https://kudu.apache.org/apidocs/org/apache/kudu/ColumnSchema.html) objects;
- and  `getCreateTableOptions()` returning a
+and  `getCreateTableOptions()` returning a
 [CreateTableOptions](https://kudu.apache.org/apidocs/org/apache/kudu/client/CreateTableOptions.html) object.
 
 This example shows the creation of a table called `ExampleTable` with two columns,
@@ -182,32 +179,46 @@
 
 ### Supported data types
 
-| Flink/SQL            | Kudu                    |
-|----------------------|:-----------------------:|
-| `STRING`             | STRING                  |
-| `BOOLEAN`            | BOOL                    |
-| `TINYINT`            | INT8                    |
-| `SMALLINT`           | INT16                   |
-| `INT`                | INT32                   |
-| `BIGINT`             | INT64                   |
-| `FLOAT`              | FLOAT                   |
-| `DOUBLE`             | DOUBLE                  |
-| `BYTES`              | BINARY                  |
-| `TIMESTAMP(3)`       | UNIXTIME_MICROS         |
+
+| Flink/SQL      |      Kudu      |
+| ---------------- | :---------------: |
+| `STRING`       |     STRING     |
+| `BOOLEAN`      |      BOOL      |
+| `TINYINT`      |      INT8      |
+| `SMALLINT`     |      INT16      |
+| `INT`          |      INT32      |
+| `BIGINT`       |      INT64      |
+| `FLOAT`        |      FLOAT      |
+| `DOUBLE`       |     DOUBLE     |
+| `BYTES`        |     BINARY     |
+| `TIMESTAMP(3)` | UNIXTIME_MICROS |
 
 Note:
-* `TIMESTAMP`s are fixed to a precision of 3, and the corresponding Java conversion class is `java.sql.Timestamp` 
+
+* `TIMESTAMP`s are fixed to a precision of 3, and the corresponding Java conversion class is `java.sql.Timestamp`
 * `BINARY` and `VARBINARY` are not yet supported - use `BYTES`, which is a `VARBINARY(2147483647)`
-*  `CHAR` and `VARCHAR` are not yet supported - use `STRING`, which is a `VARCHAR(2147483647)`
+* `CHAR` and `VARCHAR` are not yet supported - use `STRING`, which is a `VARCHAR(2147483647)`
 * `DECIMAL` types are not yet supported
 
+### Lookup Cache
+
+Kudu connector can be used in temporal join as a lookup source (aka. dimension table). Currently, only sync lookup mode is supported.
+
+By default, lookup cache is not enabled. You can enable it by setting both `lookup.cache.max-rows` and `lookup.cache.ttl`.
+
+The lookup cache is used to improve performance of temporal join theKudu connector. By default, lookup cache is not enabled, so all the requests are sent to external database. When lookup cache is enabled, each process (i.e. TaskManager) will hold a cache. Flink will lookup the cache first, and only send requests to external database when cache missing, and update cache with the rows returned. The oldest rows in cache will be expired when the cache hit to the max cached rows `kudu.lookup.cache.max-rows` or when the row exceeds the max time to live `kudu.lookup.cache.ttl`. The cached rows might not be the latest, users can tune `kudu.lookup.cache.ttl` to a smaller value to have a better fresh data, but this may increase the number of requests send to database. So this is a balance between throughput and correctness.
+
+Reference :[Flink Jdbc Connector](https://nightlies.apache.org/flink/flink-docs-release-1.15/docs/connectors/table/jdbc/#lookup-cache)
+
+
 ### Known limitations
+
 * Data type limitations (see above).
 * SQL Create table: primary keys can only be set by the `kudu.primary-key-columns` property, using the
-`PRIMARY KEY` constraint is not yet possible.
+  `PRIMARY KEY` constraint is not yet possible.
 * SQL Create table: range partitioning is not supported.
 * When getting a table through the Catalog, NOT NULL and PRIMARY KEY constraints are ignored. All columns
-are described as being nullable, and not being primary keys.
+  are described as being nullable, and not being primary keys.
 * Kudu tables cannot be altered through the catalog other than simple renaming
 
 ## DataStream API
@@ -219,13 +230,15 @@
 ### Reading tables into a DataStreams
 
 There are 2 main ways of reading a Kudu Table into a DataStream
- 1. Using the `KuduCatalog` and the Table API
- 2. Using the `KuduRowInputFormat` directly
+
+1. Using the `KuduCatalog` and the Table API
+2. Using the `KuduRowInputFormat` directly
 
 Using the `KuduCatalog` and Table API is the recommended way of reading tables as it automatically
 guarantees type safety and takes care of configuration of our readers.
 
 This is how it works in practice:
+
 ```java
 StreamTableEnvironment tableEnv = StreamTableEnvironment.create(streamEnv, tableSettings);
 
@@ -235,7 +248,6 @@
 Table table = tableEnv.sqlQuery("SELECT * FROM MyKuduTable");
 DataStream<Row> rows = tableEnv.toAppendStream(table, Row.class);
 ```
-
 The second way of achieving the same thing is by using the `KuduRowInputFormat` directly.
 In this case we have to manually provide all information about our table:
 
@@ -246,18 +258,19 @@
 
 DataStream<Row> rowStream = env.createInput(inputFormat, rowTypeInfo);
 ```
-
 At the end of the day the `KuduTableSource` is just a convenient wrapper around the `KuduRowInputFormat`.
 
 ### Kudu Sink
+
 The connector provides a `KuduSink` class that can be used to consume DataStreams
 and write the results into a Kudu table.
 
 The constructor takes 3 or 4 arguments.
- * `KuduWriterConfig` is used to specify the Kudu masters and the flush mode.
- * `KuduTableInfo` identifies the table to be written
- * `KuduOperationMapper` maps the records coming from the DataStream to a list of Kudu operations.
- * `KuduFailureHandler` (optional): If you want to provide your own logic for handling writing failures.
+
+* `KuduWriterConfig` is used to specify the Kudu masters and the flush mode.
+* `KuduTableInfo` identifies the table to be written
+* `KuduOperationMapper` maps the records coming from the DataStream to a list of Kudu operations.
+* `KuduFailureHandler` (optional): If you want to provide your own logic for handling writing failures.
 
 The example below shows the creation of a sink for Row type records of 3 fields. It Upserts each record.
 It is assumed that a Kudu table with columns `col1, col2, col3` called `AlreadyExistingTable` exists. Note that if this were not the case,
@@ -275,7 +288,6 @@
             AbstractSingleOperationMapper.KuduOperation.UPSERT)
 )
 ```
-
 #### KuduOperationMapper
 
 This section describes the Operation mapping logic in more detail.
@@ -299,12 +311,13 @@
 `createBaseOperation` method that returns a Kudu [Operation](https://kudu.apache.org/apidocs/org/apache/kudu/client/Operation.html).
 
 There are pre-defined operation mappers for Pojo, Flink Row, and Flink Tuple types for constant operation, 1-to-1 sinks.
+
 * `PojoOperationMapper`: Each table column must correspond to a POJO field
-with the same name. The  `columnNames` array should contain those fields of the POJO that
-are present as table columns (the POJO fields can be a superset of table columns).
+  with the same name. The  `columnNames` array should contain those fields of the POJO that
+  are present as table columns (the POJO fields can be a superset of table columns).
 * `RowOperationMapper` and `TupleOperationMapper`: the mapping is based on position. The
-`i`th field of the Row/Tuple corresponds to the column of the table at the `i`th
-position in the `columnNames` array.
+  `i`th field of the Row/Tuple corresponds to the column of the table at the `i`th
+  position in the `columnNames` array.
 
 ## Building the connector
 
@@ -314,7 +327,6 @@
 cd bahir-flink
 mvn clean install
 ```
-
 ### Running the tests
 
 The integration tests rely on the Kudu test harness which requires the current user to be able to ssh to localhost.
diff --git a/site/docs/flink/current/flink-streaming-netty.md b/site/docs/flink/current/flink-streaming-netty.md
index 320537c..11bed0b 100644
--- a/site/docs/flink/current/flink-streaming-netty.md
+++ b/site/docs/flink/current/flink-streaming-netty.md
@@ -61,7 +61,7 @@
 <dependency>
   <groupId>org.apache.bahir</groupId>
   <artifactId>flink-connector-netty_2.11</artifactId>
-  <version>1.1-SNAPSHOT</version>
+  <version>1.2-SNAPSHOT</version>
 </dependency>
 ```
 
diff --git a/site/docs/flink/current/flink-streaming-pinot.md b/site/docs/flink/current/flink-streaming-pinot.md
index b4c9a7b..c25ad4a 100644
--- a/site/docs/flink/current/flink-streaming-pinot.md
+++ b/site/docs/flink/current/flink-streaming-pinot.md
@@ -33,7 +33,7 @@
     <dependency>
       <groupId>org.apache.bahir</groupId>
       <artifactId>flink-connector-pinot_2.11</artifactId>
-      <version>1.1-SNAPSHOT</version>
+      <version>1.2-SNAPSHOT</version>
     </dependency>
 
 *Version Compatibility*: This module is compatible with Pinot 0.6.0.
diff --git a/site/docs/flink/current/flink-streaming-redis.md b/site/docs/flink/current/flink-streaming-redis.md
index 0c646fb..5339ea5 100644
--- a/site/docs/flink/current/flink-streaming-redis.md
+++ b/site/docs/flink/current/flink-streaming-redis.md
@@ -34,7 +34,7 @@
     <dependency>
       <groupId>org.apache.bahir</groupId>
       <artifactId>flink-connector-redis_2.11</artifactId>
-      <version>1.1-SNAPSHOT</version>
+      <version>1.2-SNAPSHOT</version>
     </dependency>
 
 *Version Compatibility*: This module is compatible with Redis 2.8.5.
diff --git a/site/docs/flink/overview.md b/site/docs/flink/overview.md
index a6793b6..a7e2abe 100644
--- a/site/docs/flink/overview.md
+++ b/site/docs/flink/overview.md
@@ -27,5 +27,6 @@
 
 ### Apache Bahir Extensions for Apache Flink
 
- - [Current - 1.1-SNAPSHOT](/docs/flink/current/documentation)
+ - [Current - 1.2-SNAPSHOT](/docs/flink/current/documentation)
+ - [1.1.0](/docs/flink/1.1.0/documentation)
  - [1.0](/docs/flink/1.0/documentation)
diff --git a/site/index.md b/site/index.md
index e6181d4..6722892 100644
--- a/site/index.md
+++ b/site/index.md
@@ -50,10 +50,13 @@
  - Flink streaming connector for ActiveMQ
  - Flink streaming connector for Akka
  - Flink streaming connector for Flume
- - Flink streaming connector for InfluxDB ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
+ - Flink streaming connector for InfluxDB 
+ - Flink streaming connector for InfluxDB2 ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
  - Flink streaming connector for Kudu ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
  - Flink streaming connector for Redis
  - Flink streaming connector for Netty
+ - Flink streaming connector for Pinot ![](/assets/themes/apache-clean/img/new-black.png){:height="36px" width="36px"}
+ - Flink streaming connector for Redis
 
 
 The {{ site.data.project.name }} community welcomes the proposal of new extensions.