Rebuild website
diff --git a/content/2014/08/26/apache-flink-0.6-available/index.html b/content/2014/08/26/apache-flink-0.6-available/index.html
index c0ef3bc..56a8ad2 100644
--- a/content/2014/08/26/apache-flink-0.6-available/index.html
+++ b/content/2014/08/26/apache-flink-0.6-available/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2014/09/26/apache-flink-0.6.1-available/index.html b/content/2014/09/26/apache-flink-0.6.1-available/index.html
index 3f6775e..d97ad95 100644
--- a/content/2014/09/26/apache-flink-0.6.1-available/index.html
+++ b/content/2014/09/26/apache-flink-0.6.1-available/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2014/10/03/upcoming-events/index.html b/content/2014/10/03/upcoming-events/index.html
index bdcd8aa..ae8b69d 100644
--- a/content/2014/10/03/upcoming-events/index.html
+++ b/content/2014/10/03/upcoming-events/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2014/11/04/apache-flink-0.7.0-available/index.html b/content/2014/11/04/apache-flink-0.7.0-available/index.html
index 431dd99..e0b436f 100644
--- a/content/2014/11/04/apache-flink-0.7.0-available/index.html
+++ b/content/2014/11/04/apache-flink-0.7.0-available/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2014/11/18/hadoop-compatibility-in-flink/index.html b/content/2014/11/18/hadoop-compatibility-in-flink/index.html
index 46b9b5e..38989e3 100644
--- a/content/2014/11/18/hadoop-compatibility-in-flink/index.html
+++ b/content/2014/11/18/hadoop-compatibility-in-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/01/06/december-2014-in-the-flink-community/index.html b/content/2015/01/06/december-2014-in-the-flink-community/index.html
index 38a2724..07e41bf 100644
--- a/content/2015/01/06/december-2014-in-the-flink-community/index.html
+++ b/content/2015/01/06/december-2014-in-the-flink-community/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/01/21/apache-flink-0.8.0-available/index.html b/content/2015/01/21/apache-flink-0.8.0-available/index.html
index 49c1ea8..3890c62 100644
--- a/content/2015/01/21/apache-flink-0.8.0-available/index.html
+++ b/content/2015/01/21/apache-flink-0.8.0-available/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/02/04/january-2015-in-the-flink-community/index.html b/content/2015/02/04/january-2015-in-the-flink-community/index.html
index 0d30fee..a6bc5fd 100644
--- a/content/2015/02/04/january-2015-in-the-flink-community/index.html
+++ b/content/2015/02/04/january-2015-in-the-flink-community/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/02/09/introducing-flink-streaming/index.html b/content/2015/02/09/introducing-flink-streaming/index.html
index 06be429..ea8c12b 100644
--- a/content/2015/02/09/introducing-flink-streaming/index.html
+++ b/content/2015/02/09/introducing-flink-streaming/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/03/02/february-2015-in-the-flink-community/index.html b/content/2015/03/02/february-2015-in-the-flink-community/index.html
index ab8414b..7007bbe 100644
--- a/content/2015/03/02/february-2015-in-the-flink-community/index.html
+++ b/content/2015/03/02/february-2015-in-the-flink-community/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/03/13/peeking-into-apache-flinks-engine-room/index.html b/content/2015/03/13/peeking-into-apache-flinks-engine-room/index.html
index a662b06..981ef68 100644
--- a/content/2015/03/13/peeking-into-apache-flinks-engine-room/index.html
+++ b/content/2015/03/13/peeking-into-apache-flinks-engine-room/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/04/07/march-2015-in-the-flink-community/index.html b/content/2015/04/07/march-2015-in-the-flink-community/index.html
index cef4cb9..6fa6440 100644
--- a/content/2015/04/07/march-2015-in-the-flink-community/index.html
+++ b/content/2015/04/07/march-2015-in-the-flink-community/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/04/13/announcing-flink-0.9.0-milestone1-preview-release/index.html b/content/2015/04/13/announcing-flink-0.9.0-milestone1-preview-release/index.html
index 0b0e813..d6f7275 100644
--- a/content/2015/04/13/announcing-flink-0.9.0-milestone1-preview-release/index.html
+++ b/content/2015/04/13/announcing-flink-0.9.0-milestone1-preview-release/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/05/11/juggling-with-bits-and-bytes/index.html b/content/2015/05/11/juggling-with-bits-and-bytes/index.html
index 6c65f57..0b1b75c 100644
--- a/content/2015/05/11/juggling-with-bits-and-bytes/index.html
+++ b/content/2015/05/11/juggling-with-bits-and-bytes/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/05/14/april-2015-in-the-flink-community/index.html b/content/2015/05/14/april-2015-in-the-flink-community/index.html
index 234cd08..eca54d4 100644
--- a/content/2015/05/14/april-2015-in-the-flink-community/index.html
+++ b/content/2015/05/14/april-2015-in-the-flink-community/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/06/24/announcing-apache-flink-0.9.0/index.html b/content/2015/06/24/announcing-apache-flink-0.9.0/index.html
index ba6f771..3060e3c 100644
--- a/content/2015/06/24/announcing-apache-flink-0.9.0/index.html
+++ b/content/2015/06/24/announcing-apache-flink-0.9.0/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/08/24/introducing-gelly-graph-processing-with-apache-flink/index.html b/content/2015/08/24/introducing-gelly-graph-processing-with-apache-flink/index.html
index 035b7a1..d363c6d 100644
--- a/content/2015/08/24/introducing-gelly-graph-processing-with-apache-flink/index.html
+++ b/content/2015/08/24/introducing-gelly-graph-processing-with-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/09/01/apache-flink-0.9.1-available/index.html b/content/2015/09/01/apache-flink-0.9.1-available/index.html
index 490dcc1..ce10274 100644
--- a/content/2015/09/01/apache-flink-0.9.1-available/index.html
+++ b/content/2015/09/01/apache-flink-0.9.1-available/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/09/03/announcing-flink-forward-2015/index.html b/content/2015/09/03/announcing-flink-forward-2015/index.html
index f5eb8fe..26588f1 100644
--- a/content/2015/09/03/announcing-flink-forward-2015/index.html
+++ b/content/2015/09/03/announcing-flink-forward-2015/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/09/16/off-heap-memory-in-apache-flink-and-the-curious-jit-compiler/index.html b/content/2015/09/16/off-heap-memory-in-apache-flink-and-the-curious-jit-compiler/index.html
index e7f50f8..54afb58 100644
--- a/content/2015/09/16/off-heap-memory-in-apache-flink-and-the-curious-jit-compiler/index.html
+++ b/content/2015/09/16/off-heap-memory-in-apache-flink-and-the-curious-jit-compiler/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/11/16/announcing-apache-flink-0.10.0/index.html b/content/2015/11/16/announcing-apache-flink-0.10.0/index.html
index 9078366..0dc8a80 100644
--- a/content/2015/11/16/announcing-apache-flink-0.10.0/index.html
+++ b/content/2015/11/16/announcing-apache-flink-0.10.0/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/11/27/flink-0.10.1-released/index.html b/content/2015/11/27/flink-0.10.1-released/index.html
index 3ebc906..fb340c1 100644
--- a/content/2015/11/27/flink-0.10.1-released/index.html
+++ b/content/2015/11/27/flink-0.10.1-released/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/12/04/introducing-stream-windows-in-apache-flink/index.html b/content/2015/12/04/introducing-stream-windows-in-apache-flink/index.html
index ac0827c..3ac451c 100644
--- a/content/2015/12/04/introducing-stream-windows-in-apache-flink/index.html
+++ b/content/2015/12/04/introducing-stream-windows-in-apache-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/index.html b/content/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/index.html
index 751b856..a34ca89 100644
--- a/content/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/index.html
+++ b/content/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2015/12/18/flink-2015-a-year-in-review-and-a-lookout-to-2016/index.html b/content/2015/12/18/flink-2015-a-year-in-review-and-a-lookout-to-2016/index.html
index 72dbbb8..2e73328 100644
--- a/content/2015/12/18/flink-2015-a-year-in-review-and-a-lookout-to-2016/index.html
+++ b/content/2015/12/18/flink-2015-a-year-in-review-and-a-lookout-to-2016/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/02/11/flink-0.10.2-released/index.html b/content/2016/02/11/flink-0.10.2-released/index.html
index 60bdb1a..03c6f9e 100644
--- a/content/2016/02/11/flink-0.10.2-released/index.html
+++ b/content/2016/02/11/flink-0.10.2-released/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/03/08/announcing-apache-flink-1.0.0/index.html b/content/2016/03/08/announcing-apache-flink-1.0.0/index.html
index d05c959..71b221e 100644
--- a/content/2016/03/08/announcing-apache-flink-1.0.0/index.html
+++ b/content/2016/03/08/announcing-apache-flink-1.0.0/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/04/06/flink-1.0.1-released/index.html b/content/2016/04/06/flink-1.0.1-released/index.html
index b3237cd..ba246a6 100644
--- a/content/2016/04/06/flink-1.0.1-released/index.html
+++ b/content/2016/04/06/flink-1.0.1-released/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/04/06/introducing-complex-event-processing-cep-with-apache-flink/index.html b/content/2016/04/06/introducing-complex-event-processing-cep-with-apache-flink/index.html
index 6933807..9291c7c 100644
--- a/content/2016/04/06/introducing-complex-event-processing-cep-with-apache-flink/index.html
+++ b/content/2016/04/06/introducing-complex-event-processing-cep-with-apache-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/04/14/flink-forward-2016-call-for-submissions-is-now-open/index.html b/content/2016/04/14/flink-forward-2016-call-for-submissions-is-now-open/index.html
index 56b9bb1..8f06b2c 100644
--- a/content/2016/04/14/flink-forward-2016-call-for-submissions-is-now-open/index.html
+++ b/content/2016/04/14/flink-forward-2016-call-for-submissions-is-now-open/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/04/22/flink-1.0.2-released/index.html b/content/2016/04/22/flink-1.0.2-released/index.html
index 2dfe198..dd9c2f0 100644
--- a/content/2016/04/22/flink-1.0.2-released/index.html
+++ b/content/2016/04/22/flink-1.0.2-released/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/05/11/flink-1.0.3-released/index.html b/content/2016/05/11/flink-1.0.3-released/index.html
index d912145..650e917 100644
--- a/content/2016/05/11/flink-1.0.3-released/index.html
+++ b/content/2016/05/11/flink-1.0.3-released/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/05/24/stream-processing-for-everyone-with-sql-and-apache-flink/index.html b/content/2016/05/24/stream-processing-for-everyone-with-sql-and-apache-flink/index.html
index 5c7f62d..c0b04b2 100644
--- a/content/2016/05/24/stream-processing-for-everyone-with-sql-and-apache-flink/index.html
+++ b/content/2016/05/24/stream-processing-for-everyone-with-sql-and-apache-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/08/04/announcing-apache-flink-1.1.0/index.html b/content/2016/08/04/announcing-apache-flink-1.1.0/index.html
index a7e3bf8..072a79a 100644
--- a/content/2016/08/04/announcing-apache-flink-1.1.0/index.html
+++ b/content/2016/08/04/announcing-apache-flink-1.1.0/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/08/04/flink-1.1.1-released/index.html b/content/2016/08/04/flink-1.1.1-released/index.html
index cde16a2..5343ca7 100644
--- a/content/2016/08/04/flink-1.1.1-released/index.html
+++ b/content/2016/08/04/flink-1.1.1-released/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/08/24/flink-forward-2016-announcing-schedule-keynotes-and-panel-discussion/index.html b/content/2016/08/24/flink-forward-2016-announcing-schedule-keynotes-and-panel-discussion/index.html
index dfcc40c..c0c8010 100644
--- a/content/2016/08/24/flink-forward-2016-announcing-schedule-keynotes-and-panel-discussion/index.html
+++ b/content/2016/08/24/flink-forward-2016-announcing-schedule-keynotes-and-panel-discussion/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/09/05/apache-flink-1.1.2-released/index.html b/content/2016/09/05/apache-flink-1.1.2-released/index.html
index b0b2fe0..b8c88c0 100644
--- a/content/2016/09/05/apache-flink-1.1.2-released/index.html
+++ b/content/2016/09/05/apache-flink-1.1.2-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/10/12/apache-flink-1.1.3-released/index.html b/content/2016/10/12/apache-flink-1.1.3-released/index.html
index f7b9a2f..7e0be51 100644
--- a/content/2016/10/12/apache-flink-1.1.3-released/index.html
+++ b/content/2016/10/12/apache-flink-1.1.3-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/12/19/apache-flink-in-2016-year-in-review/index.html b/content/2016/12/19/apache-flink-in-2016-year-in-review/index.html
index 7efd622..89fcad0 100644
--- a/content/2016/12/19/apache-flink-in-2016-year-in-review/index.html
+++ b/content/2016/12/19/apache-flink-in-2016-year-in-review/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2016/12/21/apache-flink-1.1.4-released/index.html b/content/2016/12/21/apache-flink-1.1.4-released/index.html
index d0fe6a3..939615b 100644
--- a/content/2016/12/21/apache-flink-1.1.4-released/index.html
+++ b/content/2016/12/21/apache-flink-1.1.4-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/02/06/announcing-apache-flink-1.2.0/index.html b/content/2017/02/06/announcing-apache-flink-1.2.0/index.html
index 9060622..a757b42 100644
--- a/content/2017/02/06/announcing-apache-flink-1.2.0/index.html
+++ b/content/2017/02/06/announcing-apache-flink-1.2.0/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/03/23/apache-flink-1.1.5-released/index.html b/content/2017/03/23/apache-flink-1.1.5-released/index.html
index 2c95a35..d5eee59 100644
--- a/content/2017/03/23/apache-flink-1.1.5-released/index.html
+++ b/content/2017/03/23/apache-flink-1.1.5-released/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/03/29/from-streams-to-tables-and-back-again-an-update-on-flinks-table-sql-api/index.html b/content/2017/03/29/from-streams-to-tables-and-back-again-an-update-on-flinks-table-sql-api/index.html
index 61f85f1..a0bcd63 100644
--- a/content/2017/03/29/from-streams-to-tables-and-back-again-an-update-on-flinks-table-sql-api/index.html
+++ b/content/2017/03/29/from-streams-to-tables-and-back-again-an-update-on-flinks-table-sql-api/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/03/30/continuous-queries-on-dynamic-tables/index.html b/content/2017/03/30/continuous-queries-on-dynamic-tables/index.html
index 09985c8..17f07c9 100644
--- a/content/2017/03/30/continuous-queries-on-dynamic-tables/index.html
+++ b/content/2017/03/30/continuous-queries-on-dynamic-tables/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/04/26/apache-flink-1.2.1-released/index.html b/content/2017/04/26/apache-flink-1.2.1-released/index.html
index d19a33a..b5024db 100644
--- a/content/2017/04/26/apache-flink-1.2.1-released/index.html
+++ b/content/2017/04/26/apache-flink-1.2.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/05/16/introducing-docker-images-for-apache-flink/index.html b/content/2017/05/16/introducing-docker-images-for-apache-flink/index.html
index b431bd9..384ad7d 100644
--- a/content/2017/05/16/introducing-docker-images-for-apache-flink/index.html
+++ b/content/2017/05/16/introducing-docker-images-for-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/06/01/apache-flink-1.3.0-release-announcement/index.html b/content/2017/06/01/apache-flink-1.3.0-release-announcement/index.html
index 86d19f2..62601b1 100644
--- a/content/2017/06/01/apache-flink-1.3.0-release-announcement/index.html
+++ b/content/2017/06/01/apache-flink-1.3.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/06/23/apache-flink-1.3.1-released/index.html b/content/2017/06/23/apache-flink-1.3.1-released/index.html
index 9bbf98f..210a0e9 100644
--- a/content/2017/06/23/apache-flink-1.3.1-released/index.html
+++ b/content/2017/06/23/apache-flink-1.3.1-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/07/04/a-deep-dive-into-rescalable-state-in-apache-flink/index.html b/content/2017/07/04/a-deep-dive-into-rescalable-state-in-apache-flink/index.html
index 3f544f3..49ba720 100644
--- a/content/2017/07/04/a-deep-dive-into-rescalable-state-in-apache-flink/index.html
+++ b/content/2017/07/04/a-deep-dive-into-rescalable-state-in-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/08/05/apache-flink-1.3.2-released/index.html b/content/2017/08/05/apache-flink-1.3.2-released/index.html
index e5b3e34..75d57ae 100644
--- a/content/2017/08/05/apache-flink-1.3.2-released/index.html
+++ b/content/2017/08/05/apache-flink-1.3.2-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/11/21/looking-ahead-to-apache-flink-1.4.0-and-1.5.0/index.html b/content/2017/11/21/looking-ahead-to-apache-flink-1.4.0-and-1.5.0/index.html
index 4ab4c79..f1b4a8e 100644
--- a/content/2017/11/21/looking-ahead-to-apache-flink-1.4.0-and-1.5.0/index.html
+++ b/content/2017/11/21/looking-ahead-to-apache-flink-1.4.0-and-1.5.0/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/12/12/apache-flink-1.4.0-release-announcement/index.html b/content/2017/12/12/apache-flink-1.4.0-release-announcement/index.html
index c050afb..74d3068 100644
--- a/content/2017/12/12/apache-flink-1.4.0-release-announcement/index.html
+++ b/content/2017/12/12/apache-flink-1.4.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2017/12/21/apache-flink-in-2017-year-in-review/index.html b/content/2017/12/21/apache-flink-in-2017-year-in-review/index.html
index 9b88ae1..0f7db40 100644
--- a/content/2017/12/21/apache-flink-in-2017-year-in-review/index.html
+++ b/content/2017/12/21/apache-flink-in-2017-year-in-review/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/index.html b/content/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/index.html
index 4511855..23953cb 100644
--- a/content/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/index.html
+++ b/content/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/02/15/apache-flink-1.4.1-released/index.html b/content/2018/02/15/apache-flink-1.4.1-released/index.html
index 5e981db..ddf9239 100644
--- a/content/2018/02/15/apache-flink-1.4.1-released/index.html
+++ b/content/2018/02/15/apache-flink-1.4.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/02/28/an-overview-of-end-to-end-exactly-once-processing-in-apache-flink-with-apache-kafka-too/index.html b/content/2018/02/28/an-overview-of-end-to-end-exactly-once-processing-in-apache-flink-with-apache-kafka-too/index.html
index bc6a718..096c064 100644
--- a/content/2018/02/28/an-overview-of-end-to-end-exactly-once-processing-in-apache-flink-with-apache-kafka-too/index.html
+++ b/content/2018/02/28/an-overview-of-end-to-end-exactly-once-processing-in-apache-flink-with-apache-kafka-too/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/03/08/apache-flink-1.4.2-released/index.html b/content/2018/03/08/apache-flink-1.4.2-released/index.html
index 38af1c7..a0723b4 100644
--- a/content/2018/03/08/apache-flink-1.4.2-released/index.html
+++ b/content/2018/03/08/apache-flink-1.4.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/03/15/apache-flink-1.3.3-released/index.html b/content/2018/03/15/apache-flink-1.3.3-released/index.html
index d369c35..4855a63 100644
--- a/content/2018/03/15/apache-flink-1.3.3-released/index.html
+++ b/content/2018/03/15/apache-flink-1.3.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/05/18/apache-flink-1.5.0-release-announcement/index.html b/content/2018/05/18/apache-flink-1.5.0-release-announcement/index.html
index 3db44ab..98d0273 100644
--- a/content/2018/05/18/apache-flink-1.5.0-release-announcement/index.html
+++ b/content/2018/05/18/apache-flink-1.5.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/07/12/apache-flink-1.5.1-released/index.html b/content/2018/07/12/apache-flink-1.5.1-released/index.html
index eb101ac..293ac5c 100644
--- a/content/2018/07/12/apache-flink-1.5.1-released/index.html
+++ b/content/2018/07/12/apache-flink-1.5.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/07/31/apache-flink-1.5.2-released/index.html b/content/2018/07/31/apache-flink-1.5.2-released/index.html
index a901ff5..9926e20 100644
--- a/content/2018/07/31/apache-flink-1.5.2-released/index.html
+++ b/content/2018/07/31/apache-flink-1.5.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/08/09/apache-flink-1.6.0-release-announcement/index.html b/content/2018/08/09/apache-flink-1.6.0-release-announcement/index.html
index 0e88190..c3bf208 100644
--- a/content/2018/08/09/apache-flink-1.6.0-release-announcement/index.html
+++ b/content/2018/08/09/apache-flink-1.6.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/08/21/apache-flink-1.5.3-released/index.html b/content/2018/08/21/apache-flink-1.5.3-released/index.html
index 377aa3b..c6c0d71 100644
--- a/content/2018/08/21/apache-flink-1.5.3-released/index.html
+++ b/content/2018/08/21/apache-flink-1.5.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/09/20/apache-flink-1.5.4-released/index.html b/content/2018/09/20/apache-flink-1.5.4-released/index.html
index c7c6e5c..ec89b98 100644
--- a/content/2018/09/20/apache-flink-1.5.4-released/index.html
+++ b/content/2018/09/20/apache-flink-1.5.4-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/09/20/apache-flink-1.6.1-released/index.html b/content/2018/09/20/apache-flink-1.6.1-released/index.html
index d88eb2a..dab8763 100644
--- a/content/2018/09/20/apache-flink-1.6.1-released/index.html
+++ b/content/2018/09/20/apache-flink-1.6.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/10/29/apache-flink-1.5.5-released/index.html b/content/2018/10/29/apache-flink-1.5.5-released/index.html
index 3db38d8..97731eb 100644
--- a/content/2018/10/29/apache-flink-1.5.5-released/index.html
+++ b/content/2018/10/29/apache-flink-1.5.5-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/10/29/apache-flink-1.6.2-released/index.html b/content/2018/10/29/apache-flink-1.6.2-released/index.html
index 9059e73..06e0f8a 100644
--- a/content/2018/10/29/apache-flink-1.6.2-released/index.html
+++ b/content/2018/10/29/apache-flink-1.6.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/11/30/apache-flink-1.7.0-release-announcement/index.html b/content/2018/11/30/apache-flink-1.7.0-release-announcement/index.html
index 7efab44..fef7db9 100644
--- a/content/2018/11/30/apache-flink-1.7.0-release-announcement/index.html
+++ b/content/2018/11/30/apache-flink-1.7.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/12/21/apache-flink-1.7.1-released/index.html b/content/2018/12/21/apache-flink-1.7.1-released/index.html
index 15ef604..e420ec8 100644
--- a/content/2018/12/21/apache-flink-1.7.1-released/index.html
+++ b/content/2018/12/21/apache-flink-1.7.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/12/22/apache-flink-1.6.3-released/index.html b/content/2018/12/22/apache-flink-1.6.3-released/index.html
index 13bb69a..d7dea79 100644
--- a/content/2018/12/22/apache-flink-1.6.3-released/index.html
+++ b/content/2018/12/22/apache-flink-1.6.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2018/12/26/apache-flink-1.5.6-released/index.html b/content/2018/12/26/apache-flink-1.5.6-released/index.html
index f88241b..33aae73 100644
--- a/content/2018/12/26/apache-flink-1.5.6-released/index.html
+++ b/content/2018/12/26/apache-flink-1.5.6-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/02/13/batch-as-a-special-case-of-streaming-and-alibabas-contribution-of-blink/index.html b/content/2019/02/13/batch-as-a-special-case-of-streaming-and-alibabas-contribution-of-blink/index.html
index 4aad761..1d9b778 100644
--- a/content/2019/02/13/batch-as-a-special-case-of-streaming-and-alibabas-contribution-of-blink/index.html
+++ b/content/2019/02/13/batch-as-a-special-case-of-streaming-and-alibabas-contribution-of-blink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/02/15/apache-flink-1.7.2-released/index.html b/content/2019/02/15/apache-flink-1.7.2-released/index.html
index 712880b..6a34346 100644
--- a/content/2019/02/15/apache-flink-1.7.2-released/index.html
+++ b/content/2019/02/15/apache-flink-1.7.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/02/21/monitoring-apache-flink-applications-101/index.html b/content/2019/02/21/monitoring-apache-flink-applications-101/index.html
index ba2e9dd..3ad8be1 100644
--- a/content/2019/02/21/monitoring-apache-flink-applications-101/index.html
+++ b/content/2019/02/21/monitoring-apache-flink-applications-101/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/02/25/apache-flink-1.6.4-released/index.html b/content/2019/02/25/apache-flink-1.6.4-released/index.html
index 62c302d..4ab4ec3 100644
--- a/content/2019/02/25/apache-flink-1.6.4-released/index.html
+++ b/content/2019/02/25/apache-flink-1.6.4-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/03/06/what-to-expect-from-flink-forward-san-francisco-2019/index.html b/content/2019/03/06/what-to-expect-from-flink-forward-san-francisco-2019/index.html
index a9e7583..b68a81f 100644
--- a/content/2019/03/06/what-to-expect-from-flink-forward-san-francisco-2019/index.html
+++ b/content/2019/03/06/what-to-expect-from-flink-forward-san-francisco-2019/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/03/11/flink-and-prometheus-cloud-native-monitoring-of-streaming-applications/index.html b/content/2019/03/11/flink-and-prometheus-cloud-native-monitoring-of-streaming-applications/index.html
index 3f02376..be9f14b 100644
--- a/content/2019/03/11/flink-and-prometheus-cloud-native-monitoring-of-streaming-applications/index.html
+++ b/content/2019/03/11/flink-and-prometheus-cloud-native-monitoring-of-streaming-applications/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/04/09/apache-flink-1.8.0-release-announcement/index.html b/content/2019/04/09/apache-flink-1.8.0-release-announcement/index.html
index 0541e0d..db43ae6 100644
--- a/content/2019/04/09/apache-flink-1.8.0-release-announcement/index.html
+++ b/content/2019/04/09/apache-flink-1.8.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/04/17/apache-flinks-application-to-season-of-docs/index.html b/content/2019/04/17/apache-flinks-application-to-season-of-docs/index.html
index f2273da..b68f8ae 100644
--- a/content/2019/04/17/apache-flinks-application-to-season-of-docs/index.html
+++ b/content/2019/04/17/apache-flinks-application-to-season-of-docs/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/05/03/when-flink-pulsar-come-together/index.html b/content/2019/05/03/when-flink-pulsar-come-together/index.html
index bdb971b..115935a 100644
--- a/content/2019/05/03/when-flink-pulsar-come-together/index.html
+++ b/content/2019/05/03/when-flink-pulsar-come-together/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/05/14/flux-capacitor-huh-temporal-tables-and-joins-in-streaming-sql/index.html b/content/2019/05/14/flux-capacitor-huh-temporal-tables-and-joins-in-streaming-sql/index.html
index 12b9c0b..a6b2057 100644
--- a/content/2019/05/14/flux-capacitor-huh-temporal-tables-and-joins-in-streaming-sql/index.html
+++ b/content/2019/05/14/flux-capacitor-huh-temporal-tables-and-joins-in-streaming-sql/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/05/17/state-ttl-in-flink-1.8.0-how-to-automatically-cleanup-application-state-in-apache-flink/index.html b/content/2019/05/17/state-ttl-in-flink-1.8.0-how-to-automatically-cleanup-application-state-in-apache-flink/index.html
index ef2dee7..54d2be9 100644
--- a/content/2019/05/17/state-ttl-in-flink-1.8.0-how-to-automatically-cleanup-application-state-in-apache-flink/index.html
+++ b/content/2019/05/17/state-ttl-in-flink-1.8.0-how-to-automatically-cleanup-application-state-in-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/06/05/a-deep-dive-into-flinks-network-stack/index.html b/content/2019/06/05/a-deep-dive-into-flinks-network-stack/index.html
index f90480a..092e3e4 100644
--- a/content/2019/06/05/a-deep-dive-into-flinks-network-stack/index.html
+++ b/content/2019/06/05/a-deep-dive-into-flinks-network-stack/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/index.html b/content/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/index.html
index 4ed53d7..3605bbb 100644
--- a/content/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/index.html
+++ b/content/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/07/02/apache-flink-1.8.1-released/index.html b/content/2019/07/02/apache-flink-1.8.1-released/index.html
index 85b62fd..c3ee71c 100644
--- a/content/2019/07/02/apache-flink-1.8.1-released/index.html
+++ b/content/2019/07/02/apache-flink-1.8.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/07/23/flink-network-stack-vol.-2-monitoring-metrics-and-that-backpressure-thing/index.html b/content/2019/07/23/flink-network-stack-vol.-2-monitoring-metrics-and-that-backpressure-thing/index.html
index 55a1dc4..295b3ca 100644
--- a/content/2019/07/23/flink-network-stack-vol.-2-monitoring-metrics-and-that-backpressure-thing/index.html
+++ b/content/2019/07/23/flink-network-stack-vol.-2-monitoring-metrics-and-that-backpressure-thing/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/08/22/apache-flink-1.9.0-release-announcement/index.html b/content/2019/08/22/apache-flink-1.9.0-release-announcement/index.html
index bf46bf3..c58db27 100644
--- a/content/2019/08/22/apache-flink-1.9.0-release-announcement/index.html
+++ b/content/2019/08/22/apache-flink-1.9.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/09/05/flink-community-update-september19/index.html b/content/2019/09/05/flink-community-update-september19/index.html
index 88238a3..eb4f7cb 100644
--- a/content/2019/09/05/flink-community-update-september19/index.html
+++ b/content/2019/09/05/flink-community-update-september19/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/09/11/apache-flink-1.8.2-released/index.html b/content/2019/09/11/apache-flink-1.8.2-released/index.html
index 197a8c2..6afa533 100644
--- a/content/2019/09/11/apache-flink-1.8.2-released/index.html
+++ b/content/2019/09/11/apache-flink-1.8.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/09/13/the-state-processor-api-how-to-read-write-and-modify-the-state-of-flink-applications/index.html b/content/2019/09/13/the-state-processor-api-how-to-read-write-and-modify-the-state-of-flink-applications/index.html
index 1020302..4b6066a 100644
--- a/content/2019/09/13/the-state-processor-api-how-to-read-write-and-modify-the-state-of-flink-applications/index.html
+++ b/content/2019/09/13/the-state-processor-api-how-to-read-write-and-modify-the-state-of-flink-applications/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/10/18/apache-flink-1.9.1-released/index.html b/content/2019/10/18/apache-flink-1.9.1-released/index.html
index a7a3c68..10873a7 100644
--- a/content/2019/10/18/apache-flink-1.9.1-released/index.html
+++ b/content/2019/10/18/apache-flink-1.9.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/11/06/running-apache-flink-on-kubernetes-with-kudo/index.html b/content/2019/11/06/running-apache-flink-on-kubernetes-with-kudo/index.html
index da20cdc..ac5f15a 100644
--- a/content/2019/11/06/running-apache-flink-on-kubernetes-with-kudo/index.html
+++ b/content/2019/11/06/running-apache-flink-on-kubernetes-with-kudo/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/11/25/how-to-query-pulsar-streams-using-apache-flink/index.html b/content/2019/11/25/how-to-query-pulsar-streams-using-apache-flink/index.html
index c540d5c..f4bfa41 100644
--- a/content/2019/11/25/how-to-query-pulsar-streams-using-apache-flink/index.html
+++ b/content/2019/11/25/how-to-query-pulsar-streams-using-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2019/12/11/apache-flink-1.8.3-released/index.html b/content/2019/12/11/apache-flink-1.8.3-released/index.html
index d544266..9b99b23 100644
--- a/content/2019/12/11/apache-flink-1.8.3-released/index.html
+++ b/content/2019/12/11/apache-flink-1.8.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/01/15/advanced-flink-application-patterns-vol.1-case-study-of-a-fraud-detection-system/index.html b/content/2020/01/15/advanced-flink-application-patterns-vol.1-case-study-of-a-fraud-detection-system/index.html
index d1713b9..bbc5025 100644
--- a/content/2020/01/15/advanced-flink-application-patterns-vol.1-case-study-of-a-fraud-detection-system/index.html
+++ b/content/2020/01/15/advanced-flink-application-patterns-vol.1-case-study-of-a-fraud-detection-system/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/01/29/state-unlocked-interacting-with-state-in-apache-flink/index.html b/content/2020/01/29/state-unlocked-interacting-with-state-in-apache-flink/index.html
index c0d0644..9ad9e5e 100644
--- a/content/2020/01/29/state-unlocked-interacting-with-state-in-apache-flink/index.html
+++ b/content/2020/01/29/state-unlocked-interacting-with-state-in-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/01/30/apache-flink-1.9.2-released/index.html b/content/2020/01/30/apache-flink-1.9.2-released/index.html
index f823b3d..575eda8 100644
--- a/content/2020/01/30/apache-flink-1.9.2-released/index.html
+++ b/content/2020/01/30/apache-flink-1.9.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/02/03/a-guide-for-unit-testing-in-apache-flink/index.html b/content/2020/02/03/a-guide-for-unit-testing-in-apache-flink/index.html
index cacf04a..e535dbd 100644
--- a/content/2020/02/03/a-guide-for-unit-testing-in-apache-flink/index.html
+++ b/content/2020/02/03/a-guide-for-unit-testing-in-apache-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/02/11/apache-flink-1.10.0-release-announcement/index.html b/content/2020/02/11/apache-flink-1.10.0-release-announcement/index.html
index 50f3026..c9c5888 100644
--- a/content/2020/02/11/apache-flink-1.10.0-release-announcement/index.html
+++ b/content/2020/02/11/apache-flink-1.10.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/02/20/no-java-required-configuring-sources-and-sinks-in-sql/index.html b/content/2020/02/20/no-java-required-configuring-sources-and-sinks-in-sql/index.html
index 09f4e9a..2003f07 100644
--- a/content/2020/02/20/no-java-required-configuring-sources-and-sinks-in-sql/index.html
+++ b/content/2020/02/20/no-java-required-configuring-sources-and-sinks-in-sql/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/02/22/apache-beam-how-beam-runs-on-top-of-flink/index.html b/content/2020/02/22/apache-beam-how-beam-runs-on-top-of-flink/index.html
index 1bf403e..2bacd4a 100644
--- a/content/2020/02/22/apache-beam-how-beam-runs-on-top-of-flink/index.html
+++ b/content/2020/02/22/apache-beam-how-beam-runs-on-top-of-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/03/24/advanced-flink-application-patterns-vol.2-dynamic-updates-of-application-logic/index.html b/content/2020/03/24/advanced-flink-application-patterns-vol.2-dynamic-updates-of-application-logic/index.html
index 5eee06b..931475a 100644
--- a/content/2020/03/24/advanced-flink-application-patterns-vol.2-dynamic-updates-of-application-logic/index.html
+++ b/content/2020/03/24/advanced-flink-application-patterns-vol.2-dynamic-updates-of-application-logic/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/03/27/flink-as-unified-engine-for-modern-data-warehousing-production-ready-hive-integration/index.html b/content/2020/03/27/flink-as-unified-engine-for-modern-data-warehousing-production-ready-hive-integration/index.html
index da23938..3b94561 100644
--- a/content/2020/03/27/flink-as-unified-engine-for-modern-data-warehousing-production-ready-hive-integration/index.html
+++ b/content/2020/03/27/flink-as-unified-engine-for-modern-data-warehousing-production-ready-hive-integration/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/03/30/flink-community-update-april20/index.html b/content/2020/03/30/flink-community-update-april20/index.html
index c7b4137..8048732 100644
--- a/content/2020/03/30/flink-community-update-april20/index.html
+++ b/content/2020/03/30/flink-community-update-april20/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/04/07/stateful-functions-2.0-an-event-driven-database-on-apache-flink/index.html b/content/2020/04/07/stateful-functions-2.0-an-event-driven-database-on-apache-flink/index.html
index c44872a..073722f 100644
--- a/content/2020/04/07/stateful-functions-2.0-an-event-driven-database-on-apache-flink/index.html
+++ b/content/2020/04/07/stateful-functions-2.0-an-event-driven-database-on-apache-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/04/09/pyflink-introducing-python-support-for-udfs-in-flinks-table-api/index.html b/content/2020/04/09/pyflink-introducing-python-support-for-udfs-in-flinks-table-api/index.html
index 99499bf..eb3c278 100644
--- a/content/2020/04/09/pyflink-introducing-python-support-for-udfs-in-flinks-table-api/index.html
+++ b/content/2020/04/09/pyflink-introducing-python-support-for-udfs-in-flinks-table-api/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/04/15/flink-serialization-tuning-vol.-1-choosing-your-serializer-if-you-can/index.html b/content/2020/04/15/flink-serialization-tuning-vol.-1-choosing-your-serializer-if-you-can/index.html
index e0b83f4..a6880c7 100644
--- a/content/2020/04/15/flink-serialization-tuning-vol.-1-choosing-your-serializer-if-you-can/index.html
+++ b/content/2020/04/15/flink-serialization-tuning-vol.-1-choosing-your-serializer-if-you-can/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/04/21/memory-management-improvements-with-apache-flink-1.10/index.html b/content/2020/04/21/memory-management-improvements-with-apache-flink-1.10/index.html
index bb2852d..c7c5371 100644
--- a/content/2020/04/21/memory-management-improvements-with-apache-flink-1.10/index.html
+++ b/content/2020/04/21/memory-management-improvements-with-apache-flink-1.10/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/04/24/apache-flink-1.9.3-released/index.html b/content/2020/04/24/apache-flink-1.9.3-released/index.html
index 4068dfd..3191e30 100644
--- a/content/2020/04/24/apache-flink-1.9.3-released/index.html
+++ b/content/2020/04/24/apache-flink-1.9.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/05/04/applying-to-google-season-of-docs-2020/index.html b/content/2020/05/04/applying-to-google-season-of-docs-2020/index.html
index 7db2871..7b4814f 100644
--- a/content/2020/05/04/applying-to-google-season-of-docs-2020/index.html
+++ b/content/2020/05/04/applying-to-google-season-of-docs-2020/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/05/06/flink-community-update-may20/index.html b/content/2020/05/06/flink-community-update-may20/index.html
index 449778c..57546f1 100644
--- a/content/2020/05/06/flink-community-update-may20/index.html
+++ b/content/2020/05/06/flink-community-update-may20/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/05/12/apache-flink-1.10.1-released/index.html b/content/2020/05/12/apache-flink-1.10.1-released/index.html
index 0a01f41..e409a3a 100644
--- a/content/2020/05/12/apache-flink-1.10.1-released/index.html
+++ b/content/2020/05/12/apache-flink-1.10.1-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/06/09/stateful-functions-2.1.0-release-announcement/index.html b/content/2020/06/09/stateful-functions-2.1.0-release-announcement/index.html
index e312a30..bb85af4 100644
--- a/content/2020/06/09/stateful-functions-2.1.0-release-announcement/index.html
+++ b/content/2020/06/09/stateful-functions-2.1.0-release-announcement/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/06/10/flink-community-update-june20/index.html b/content/2020/06/10/flink-community-update-june20/index.html
index b9795a5..0fac7c6 100644
--- a/content/2020/06/10/flink-community-update-june20/index.html
+++ b/content/2020/06/10/flink-community-update-june20/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/06/15/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-1/index.html b/content/2020/06/15/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-1/index.html
index 64c7475..f818c2c 100644
--- a/content/2020/06/15/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-1/index.html
+++ b/content/2020/06/15/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-1/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/06/23/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-2/index.html b/content/2020/06/23/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-2/index.html
index 0e0ef15..90cf532 100644
--- a/content/2020/06/23/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-2/index.html
+++ b/content/2020/06/23/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-2/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/07/06/apache-flink-1.11.0-release-announcement/index.html b/content/2020/07/06/apache-flink-1.11.0-release-announcement/index.html
index 8145a24..335ed21 100644
--- a/content/2020/07/06/apache-flink-1.11.0-release-announcement/index.html
+++ b/content/2020/07/06/apache-flink-1.11.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/07/14/application-deployment-in-flink-current-state-and-the-new-application-mode/index.html b/content/2020/07/14/application-deployment-in-flink-current-state-and-the-new-application-mode/index.html
index 084a7ea..8a5f9ca 100644
--- a/content/2020/07/14/application-deployment-in-flink-current-state-and-the-new-application-mode/index.html
+++ b/content/2020/07/14/application-deployment-in-flink-current-state-and-the-new-application-mode/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/07/21/apache-flink-1.11.1-released/index.html b/content/2020/07/21/apache-flink-1.11.1-released/index.html
index 5a1e9cc..0ef7a9d 100644
--- a/content/2020/07/21/apache-flink-1.11.1-released/index.html
+++ b/content/2020/07/21/apache-flink-1.11.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/07/23/sharing-is-caring-catalogs-in-flink-sql/index.html b/content/2020/07/23/sharing-is-caring-catalogs-in-flink-sql/index.html
index 1ae9527..9a31f65 100644
--- a/content/2020/07/23/sharing-is-caring-catalogs-in-flink-sql/index.html
+++ b/content/2020/07/23/sharing-is-caring-catalogs-in-flink-sql/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/07/28/flink-sql-demo-building-an-end-to-end-streaming-application/index.html b/content/2020/07/28/flink-sql-demo-building-an-end-to-end-streaming-application/index.html
index a3a291f..001d6e6 100644
--- a/content/2020/07/28/flink-sql-demo-building-an-end-to-end-streaming-application/index.html
+++ b/content/2020/07/28/flink-sql-demo-building-an-end-to-end-streaming-application/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/07/29/flink-community-update-july20/index.html b/content/2020/07/29/flink-community-update-july20/index.html
index f0740be..6cf311f 100644
--- a/content/2020/07/29/flink-community-update-july20/index.html
+++ b/content/2020/07/29/flink-community-update-july20/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/07/30/advanced-flink-application-patterns-vol.3-custom-window-processing/index.html b/content/2020/07/30/advanced-flink-application-patterns-vol.3-custom-window-processing/index.html
index 41714b5..790adc6 100644
--- a/content/2020/07/30/advanced-flink-application-patterns-vol.3-custom-window-processing/index.html
+++ b/content/2020/07/30/advanced-flink-application-patterns-vol.3-custom-window-processing/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/index.html b/content/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/index.html
index be07fc7..dd77ba9 100644
--- a/content/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/index.html
+++ b/content/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/08/06/accelerating-your-workload-with-gpu-and-other-external-resources/index.html b/content/2020/08/06/accelerating-your-workload-with-gpu-and-other-external-resources/index.html
index 5de88aa..385cb1e 100644
--- a/content/2020/08/06/accelerating-your-workload-with-gpu-and-other-external-resources/index.html
+++ b/content/2020/08/06/accelerating-your-workload-with-gpu-and-other-external-resources/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/08/18/monitoring-and-controlling-networks-of-iot-devices-with-flink-stateful-functions/index.html b/content/2020/08/18/monitoring-and-controlling-networks-of-iot-devices-with-flink-stateful-functions/index.html
index b6e1691..c7e63bf 100644
--- a/content/2020/08/18/monitoring-and-controlling-networks-of-iot-devices-with-flink-stateful-functions/index.html
+++ b/content/2020/08/18/monitoring-and-controlling-networks-of-iot-devices-with-flink-stateful-functions/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/08/20/the-state-of-flink-on-docker/index.html b/content/2020/08/20/the-state-of-flink-on-docker/index.html
index 6f8241e..9c0b037 100644
--- a/content/2020/08/20/the-state-of-flink-on-docker/index.html
+++ b/content/2020/08/20/the-state-of-flink-on-docker/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/08/25/apache-flink-1.10.2-released/index.html b/content/2020/08/25/apache-flink-1.10.2-released/index.html
index 4b784d2..845ac6f 100644
--- a/content/2020/08/25/apache-flink-1.10.2-released/index.html
+++ b/content/2020/08/25/apache-flink-1.10.2-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/09/01/memory-management-improvements-for-flinks-jobmanager-in-apache-flink-1.11/index.html b/content/2020/09/01/memory-management-improvements-for-flinks-jobmanager-in-apache-flink-1.11/index.html
index fcde99f..fcd9f11 100644
--- a/content/2020/09/01/memory-management-improvements-for-flinks-jobmanager-in-apache-flink-1.11/index.html
+++ b/content/2020/09/01/memory-management-improvements-for-flinks-jobmanager-in-apache-flink-1.11/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/09/04/flink-community-update-august20/index.html b/content/2020/09/04/flink-community-update-august20/index.html
index c9d3e80..15a775f 100644
--- a/content/2020/09/04/flink-community-update-august20/index.html
+++ b/content/2020/09/04/flink-community-update-august20/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/09/17/apache-flink-1.11.2-released/index.html b/content/2020/09/17/apache-flink-1.11.2-released/index.html
index 710997a..97424d5 100644
--- a/content/2020/09/17/apache-flink-1.11.2-released/index.html
+++ b/content/2020/09/17/apache-flink-1.11.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/09/28/stateful-functions-2.2.0-release-announcement/index.html b/content/2020/09/28/stateful-functions-2.2.0-release-announcement/index.html
index 98c32a2..39b9226 100644
--- a/content/2020/09/28/stateful-functions-2.2.0-release-announcement/index.html
+++ b/content/2020/09/28/stateful-functions-2.2.0-release-announcement/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/10/13/stateful-functions-internals-behind-the-scenes-of-stateful-serverless/index.html b/content/2020/10/13/stateful-functions-internals-behind-the-scenes-of-stateful-serverless/index.html
index a71c828..4f4df06 100644
--- a/content/2020/10/13/stateful-functions-internals-behind-the-scenes-of-stateful-serverless/index.html
+++ b/content/2020/10/13/stateful-functions-internals-behind-the-scenes-of-stateful-serverless/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/index.html b/content/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/index.html
index c27c31e..fa65467 100644
--- a/content/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/index.html
+++ b/content/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/11/11/stateful-functions-2.2.1-release-announcement/index.html b/content/2020/11/11/stateful-functions-2.2.1-release-announcement/index.html
index 708a832..c195b6b 100644
--- a/content/2020/11/11/stateful-functions-2.2.1-release-announcement/index.html
+++ b/content/2020/11/11/stateful-functions-2.2.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/12/02/improvements-in-task-scheduling-for-batch-workloads-in-apache-flink-1.12/index.html b/content/2020/12/02/improvements-in-task-scheduling-for-batch-workloads-in-apache-flink-1.12/index.html
index fbb93a6..03572b8 100644
--- a/content/2020/12/02/improvements-in-task-scheduling-for-batch-workloads-in-apache-flink-1.12/index.html
+++ b/content/2020/12/02/improvements-in-task-scheduling-for-batch-workloads-in-apache-flink-1.12/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/12/10/apache-flink-1.12.0-release-announcement/index.html b/content/2020/12/10/apache-flink-1.12.0-release-announcement/index.html
index 9e3d2d6..1ac2af4 100644
--- a/content/2020/12/10/apache-flink-1.12.0-release-announcement/index.html
+++ b/content/2020/12/10/apache-flink-1.12.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2020/12/18/apache-flink-1.11.3-released/index.html b/content/2020/12/18/apache-flink-1.11.3-released/index.html
index 6ffdb9b..2d85239 100644
--- a/content/2020/12/18/apache-flink-1.11.3-released/index.html
+++ b/content/2020/12/18/apache-flink-1.11.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/01/02/stateful-functions-2.2.2-release-announcement/index.html b/content/2021/01/02/stateful-functions-2.2.2-release-announcement/index.html
index d26fb9a..2f37e9b 100644
--- a/content/2021/01/02/stateful-functions-2.2.2-release-announcement/index.html
+++ b/content/2021/01/02/stateful-functions-2.2.2-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/01/07/whats-new-in-the-pulsar-flink-connector-2.7.0/index.html b/content/2021/01/07/whats-new-in-the-pulsar-flink-connector-2.7.0/index.html
index 6a35745..ecd8432 100644
--- a/content/2021/01/07/whats-new-in-the-pulsar-flink-connector-2.7.0/index.html
+++ b/content/2021/01/07/whats-new-in-the-pulsar-flink-connector-2.7.0/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/01/11/exploring-fine-grained-recovery-of-bounded-data-sets-on-flink/index.html b/content/2021/01/11/exploring-fine-grained-recovery-of-bounded-data-sets-on-flink/index.html
index af5fabf..057c6a2 100644
--- a/content/2021/01/11/exploring-fine-grained-recovery-of-bounded-data-sets-on-flink/index.html
+++ b/content/2021/01/11/exploring-fine-grained-recovery-of-bounded-data-sets-on-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/01/18/using-rocksdb-state-backend-in-apache-flink-when-and-how/index.html b/content/2021/01/18/using-rocksdb-state-backend-in-apache-flink-when-and-how/index.html
index 023e44b..55a0c45 100644
--- a/content/2021/01/18/using-rocksdb-state-backend-in-apache-flink-when-and-how/index.html
+++ b/content/2021/01/18/using-rocksdb-state-backend-in-apache-flink-when-and-how/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/01/19/apache-flink-1.12.1-released/index.html b/content/2021/01/19/apache-flink-1.12.1-released/index.html
index 9d5c69e..fb56041 100644
--- a/content/2021/01/19/apache-flink-1.12.1-released/index.html
+++ b/content/2021/01/19/apache-flink-1.12.1-released/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/01/29/apache-flink-1.10.3-released/index.html b/content/2021/01/29/apache-flink-1.10.3-released/index.html
index 665b803..0b0c5d5 100644
--- a/content/2021/01/29/apache-flink-1.10.3-released/index.html
+++ b/content/2021/01/29/apache-flink-1.10.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/02/10/how-to-natively-deploy-flink-on-kubernetes-with-high-availability-ha/index.html b/content/2021/02/10/how-to-natively-deploy-flink-on-kubernetes-with-high-availability-ha/index.html
index 6e3c934..d07b339 100644
--- a/content/2021/02/10/how-to-natively-deploy-flink-on-kubernetes-with-high-availability-ha/index.html
+++ b/content/2021/02/10/how-to-natively-deploy-flink-on-kubernetes-with-high-availability-ha/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/03/03/apache-flink-1.12.2-released/index.html b/content/2021/03/03/apache-flink-1.12.2-released/index.html
index cde9f87..2d13dbe 100644
--- a/content/2021/03/03/apache-flink-1.12.2-released/index.html
+++ b/content/2021/03/03/apache-flink-1.12.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/03/11/a-rundown-of-batch-execution-mode-in-the-datastream-api/index.html b/content/2021/03/11/a-rundown-of-batch-execution-mode-in-the-datastream-api/index.html
index 13fbfa0..e9957ba 100644
--- a/content/2021/03/11/a-rundown-of-batch-execution-mode-in-the-datastream-api/index.html
+++ b/content/2021/03/11/a-rundown-of-batch-execution-mode-in-the-datastream-api/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/04/15/stateful-functions-3.0.0-remote-functions-front-and-center/index.html b/content/2021/04/15/stateful-functions-3.0.0-remote-functions-front-and-center/index.html
index fa63639..b8cddfd 100644
--- a/content/2021/04/15/stateful-functions-3.0.0-remote-functions-front-and-center/index.html
+++ b/content/2021/04/15/stateful-functions-3.0.0-remote-functions-front-and-center/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/04/29/apache-flink-1.12.3-released/index.html b/content/2021/04/29/apache-flink-1.12.3-released/index.html
index 8a9ebd6..dee5ef5 100644
--- a/content/2021/04/29/apache-flink-1.12.3-released/index.html
+++ b/content/2021/04/29/apache-flink-1.12.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/05/03/apache-flink-1.13.0-release-announcement/index.html b/content/2021/05/03/apache-flink-1.13.0-release-announcement/index.html
index 34a6981..0d13cef 100644
--- a/content/2021/05/03/apache-flink-1.13.0-release-announcement/index.html
+++ b/content/2021/05/03/apache-flink-1.13.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/05/06/scaling-flink-automatically-with-reactive-mode/index.html b/content/2021/05/06/scaling-flink-automatically-with-reactive-mode/index.html
index 50f992c..a1cca54 100644
--- a/content/2021/05/06/scaling-flink-automatically-with-reactive-mode/index.html
+++ b/content/2021/05/06/scaling-flink-automatically-with-reactive-mode/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/05/21/apache-flink-1.12.4-released/index.html b/content/2021/05/21/apache-flink-1.12.4-released/index.html
index de5a257..b8cdc75 100644
--- a/content/2021/05/21/apache-flink-1.12.4-released/index.html
+++ b/content/2021/05/21/apache-flink-1.12.4-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/05/28/apache-flink-1.13.1-released/index.html b/content/2021/05/28/apache-flink-1.13.1-released/index.html
index 13df78b..16ef79e 100644
--- a/content/2021/05/28/apache-flink-1.13.1-released/index.html
+++ b/content/2021/05/28/apache-flink-1.13.1-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/07/07/how-to-identify-the-source-of-backpressure/index.html b/content/2021/07/07/how-to-identify-the-source-of-backpressure/index.html
index 5713da6..2bc1845 100644
--- a/content/2021/07/07/how-to-identify-the-source-of-backpressure/index.html
+++ b/content/2021/07/07/how-to-identify-the-source-of-backpressure/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/08/06/apache-flink-1.12.5-released/index.html b/content/2021/08/06/apache-flink-1.12.5-released/index.html
index ffebeb6..cbef3d3 100644
--- a/content/2021/08/06/apache-flink-1.12.5-released/index.html
+++ b/content/2021/08/06/apache-flink-1.12.5-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/08/06/apache-flink-1.13.2-released/index.html b/content/2021/08/06/apache-flink-1.13.2-released/index.html
index 8a0e47f..e886d1a 100644
--- a/content/2021/08/06/apache-flink-1.13.2-released/index.html
+++ b/content/2021/08/06/apache-flink-1.13.2-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/08/09/apache-flink-1.11.4-released/index.html b/content/2021/08/09/apache-flink-1.11.4-released/index.html
index 2fbb7a1..10817d6 100644
--- a/content/2021/08/09/apache-flink-1.11.4-released/index.html
+++ b/content/2021/08/09/apache-flink-1.11.4-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/08/31/help-us-stabilize-apache-flink-1.14.0-rc0/index.html b/content/2021/08/31/help-us-stabilize-apache-flink-1.14.0-rc0/index.html
index ae9b228..08acfbb 100644
--- a/content/2021/08/31/help-us-stabilize-apache-flink-1.14.0-rc0/index.html
+++ b/content/2021/08/31/help-us-stabilize-apache-flink-1.14.0-rc0/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/08/31/stateful-functions-3.1.0-release-announcement/index.html b/content/2021/08/31/stateful-functions-3.1.0-release-announcement/index.html
index e00e616..1f12379 100644
--- a/content/2021/08/31/stateful-functions-3.1.0-release-announcement/index.html
+++ b/content/2021/08/31/stateful-functions-3.1.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-one/index.html b/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-one/index.html
index b4b40f0..61c3f16 100644
--- a/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-one/index.html
+++ b/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-one/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-two/index.html b/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-two/index.html
index 499ed60..d807658 100644
--- a/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-two/index.html
+++ b/content/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-two/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/09/29/apache-flink-1.14.0-release-announcement/index.html b/content/2021/09/29/apache-flink-1.14.0-release-announcement/index.html
index 8b0157f..86064c3 100644
--- a/content/2021/09/29/apache-flink-1.14.0-release-announcement/index.html
+++ b/content/2021/09/29/apache-flink-1.14.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/10/19/apache-flink-1.13.3-released/index.html b/content/2021/10/19/apache-flink-1.13.3-released/index.html
index d8d1abb..ad9e2ac 100644
--- a/content/2021/10/19/apache-flink-1.13.3-released/index.html
+++ b/content/2021/10/19/apache-flink-1.13.3-released/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-one/index.html b/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-one/index.html
index 86ff2ce..c2f8038 100644
--- a/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-one/index.html
+++ b/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-one/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/index.html b/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/index.html
index 375325a..631f7c0 100644
--- a/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/index.html
+++ b/content/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/11/03/flink-backward-the-apache-flink-retrospective/index.html b/content/2021/11/03/flink-backward-the-apache-flink-retrospective/index.html
index b5e6a3b..61a31cc 100644
--- a/content/2021/11/03/flink-backward-the-apache-flink-retrospective/index.html
+++ b/content/2021/11/03/flink-backward-the-apache-flink-retrospective/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/12/10/advise-on-apache-log4j-zero-day-cve-2021-44228/index.html b/content/2021/12/10/advise-on-apache-log4j-zero-day-cve-2021-44228/index.html
index 44b57e5..32c00b1 100644
--- a/content/2021/12/10/advise-on-apache-log4j-zero-day-cve-2021-44228/index.html
+++ b/content/2021/12/10/advise-on-apache-log4j-zero-day-cve-2021-44228/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/12/16/apache-flink-log4j-emergency-releases/index.html b/content/2021/12/16/apache-flink-log4j-emergency-releases/index.html
index 5357fc5..1b15f69 100644
--- a/content/2021/12/16/apache-flink-log4j-emergency-releases/index.html
+++ b/content/2021/12/16/apache-flink-log4j-emergency-releases/index.html
@@ -34,7 +34,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2021/12/22/apache-flink-statefun-log4j-emergency-release/index.html b/content/2021/12/22/apache-flink-statefun-log4j-emergency-release/index.html
index edc72d0..cff4f9e 100644
--- a/content/2021/12/22/apache-flink-statefun-log4j-emergency-release/index.html
+++ b/content/2021/12/22/apache-flink-statefun-log4j-emergency-release/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-one/index.html b/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-one/index.html
index 70340fd..f195d5e 100644
--- a/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-one/index.html
+++ b/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-one/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-two/index.html b/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-two/index.html
index 4c39a77..b97a857 100644
--- a/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-two/index.html
+++ b/content/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-two/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/01/07/apache-flink-ml-2.0.0-release-announcement/index.html b/content/2022/01/07/apache-flink-ml-2.0.0-release-announcement/index.html
index dc9a6a8..670d5fc 100644
--- a/content/2022/01/07/apache-flink-ml-2.0.0-release-announcement/index.html
+++ b/content/2022/01/07/apache-flink-ml-2.0.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/01/17/apache-flink-1.14.3-release-announcement/index.html b/content/2022/01/17/apache-flink-1.14.3-release-announcement/index.html
index ca1d600..77c702e 100644
--- a/content/2022/01/17/apache-flink-1.14.3-release-announcement/index.html
+++ b/content/2022/01/17/apache-flink-1.14.3-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/01/20/pravega-flink-connector-101/index.html b/content/2022/01/20/pravega-flink-connector-101/index.html
index 90ba4d6..bc3e782 100644
--- a/content/2022/01/20/pravega-flink-connector-101/index.html
+++ b/content/2022/01/20/pravega-flink-connector-101/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/01/31/stateful-functions-3.2.0-release-announcement/index.html b/content/2022/01/31/stateful-functions-3.2.0-release-announcement/index.html
index 4469197..c1018ad 100644
--- a/content/2022/01/31/stateful-functions-3.2.0-release-announcement/index.html
+++ b/content/2022/01/31/stateful-functions-3.2.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/02/18/apache-flink-1.13.6-release-announcement/index.html b/content/2022/02/18/apache-flink-1.13.6-release-announcement/index.html
index 0e96d2e..4241cc2 100644
--- a/content/2022/02/18/apache-flink-1.13.6-release-announcement/index.html
+++ b/content/2022/02/18/apache-flink-1.13.6-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/02/22/scala-free-in-one-fifteen/index.html b/content/2022/02/22/scala-free-in-one-fifteen/index.html
index a3d71eb..928f53d 100644
--- a/content/2022/02/22/scala-free-in-one-fifteen/index.html
+++ b/content/2022/02/22/scala-free-in-one-fifteen/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/03/11/apache-flink-1.14.4-release-announcement/index.html b/content/2022/03/11/apache-flink-1.14.4-release-announcement/index.html
index 174a02e..10145a7 100644
--- a/content/2022/03/11/apache-flink-1.14.4-release-announcement/index.html
+++ b/content/2022/03/11/apache-flink-1.14.4-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/03/16/the-generic-asynchronous-base-sink/index.html b/content/2022/03/16/the-generic-asynchronous-base-sink/index.html
index 9912fbf..45f7869 100644
--- a/content/2022/03/16/the-generic-asynchronous-base-sink/index.html
+++ b/content/2022/03/16/the-generic-asynchronous-base-sink/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/04/03/apache-flink-kubernetes-operator-0.1.0-release-announcement/index.html b/content/2022/04/03/apache-flink-kubernetes-operator-0.1.0-release-announcement/index.html
index 69aade6..b7c5be4 100644
--- a/content/2022/04/03/apache-flink-kubernetes-operator-0.1.0-release-announcement/index.html
+++ b/content/2022/04/03/apache-flink-kubernetes-operator-0.1.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/05/05/announcing-the-release-of-apache-flink-1.15/index.html b/content/2022/05/05/announcing-the-release-of-apache-flink-1.15/index.html
index 082ccf2..6a6e1eb 100644
--- a/content/2022/05/05/announcing-the-release-of-apache-flink-1.15/index.html
+++ b/content/2022/05/05/announcing-the-release-of-apache-flink-1.15/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/05/06/exploring-the-thread-mode-in-pyflink/index.html b/content/2022/05/06/exploring-the-thread-mode-in-pyflink/index.html
index 2ca81d0..ba9c8da 100644
--- a/content/2022/05/06/exploring-the-thread-mode-in-pyflink/index.html
+++ b/content/2022/05/06/exploring-the-thread-mode-in-pyflink/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/05/06/improvements-to-flink-operations-snapshots-ownership-and-savepoint-formats/index.html b/content/2022/05/06/improvements-to-flink-operations-snapshots-ownership-and-savepoint-formats/index.html
index f30822f..4e32b42 100644
--- a/content/2022/05/06/improvements-to-flink-operations-snapshots-ownership-and-savepoint-formats/index.html
+++ b/content/2022/05/06/improvements-to-flink-operations-snapshots-ownership-and-savepoint-formats/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/index.html b/content/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/index.html
index d53878a..51dbb80 100644
--- a/content/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/index.html
+++ b/content/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/05/18/getting-into-low-latency-gears-with-apache-flink-part-one/index.html b/content/2022/05/18/getting-into-low-latency-gears-with-apache-flink-part-one/index.html
index 6389232..89e2f0a 100644
--- a/content/2022/05/18/getting-into-low-latency-gears-with-apache-flink-part-one/index.html
+++ b/content/2022/05/18/getting-into-low-latency-gears-with-apache-flink-part-one/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/05/23/getting-into-low-latency-gears-with-apache-flink-part-two/index.html b/content/2022/05/23/getting-into-low-latency-gears-with-apache-flink-part-two/index.html
index 890721d..117243a 100644
--- a/content/2022/05/23/getting-into-low-latency-gears-with-apache-flink-part-two/index.html
+++ b/content/2022/05/23/getting-into-low-latency-gears-with-apache-flink-part-two/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/05/30/improving-speed-and-stability-of-checkpointing-with-generic-log-based-incremental-checkpoints/index.html b/content/2022/05/30/improving-speed-and-stability-of-checkpointing-with-generic-log-based-incremental-checkpoints/index.html
index 761dff7..2c4f8c3 100644
--- a/content/2022/05/30/improving-speed-and-stability-of-checkpointing-with-generic-log-based-incremental-checkpoints/index.html
+++ b/content/2022/05/30/improving-speed-and-stability-of-checkpointing-with-generic-log-based-incremental-checkpoints/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/06/05/apache-flink-kubernetes-operator-1.0.0-release-announcement/index.html b/content/2022/06/05/apache-flink-kubernetes-operator-1.0.0-release-announcement/index.html
index 18f8b5c..1aed51d 100644
--- a/content/2022/06/05/apache-flink-kubernetes-operator-1.0.0-release-announcement/index.html
+++ b/content/2022/06/05/apache-flink-kubernetes-operator-1.0.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/06/17/adaptive-batch-scheduler-automatically-decide-parallelism-of-flink-batch-jobs/index.html b/content/2022/06/17/adaptive-batch-scheduler-automatically-decide-parallelism-of-flink-batch-jobs/index.html
index ded93b3..65d2241 100644
--- a/content/2022/06/17/adaptive-batch-scheduler-automatically-decide-parallelism-of-flink-batch-jobs/index.html
+++ b/content/2022/06/17/adaptive-batch-scheduler-automatically-decide-parallelism-of-flink-batch-jobs/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/06/22/apache-flink-1.14.5-release-announcement/index.html b/content/2022/06/22/apache-flink-1.14.5-release-announcement/index.html
index 2b99fcc..086cbb4 100644
--- a/content/2022/06/22/apache-flink-1.14.5-release-announcement/index.html
+++ b/content/2022/06/22/apache-flink-1.14.5-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/07/06/apache-flink-1.15.1-release-announcement/index.html b/content/2022/07/06/apache-flink-1.15.1-release-announcement/index.html
index 3749bf8..c547054 100644
--- a/content/2022/07/06/apache-flink-1.15.1-release-announcement/index.html
+++ b/content/2022/07/06/apache-flink-1.15.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-one/index.html b/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-one/index.html
index e6cfdb1..847f874 100644
--- a/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-one/index.html
+++ b/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-one/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-two/index.html b/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-two/index.html
index e21e2ab..63fb8f9 100644
--- a/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-two/index.html
+++ b/content/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-two/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/07/12/apache-flink-ml-2.1.0-release-announcement/index.html b/content/2022/07/12/apache-flink-ml-2.1.0-release-announcement/index.html
index 89a972b..ef852cb 100644
--- a/content/2022/07/12/apache-flink-ml-2.1.0-release-announcement/index.html
+++ b/content/2022/07/12/apache-flink-ml-2.1.0-release-announcement/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/07/25/apache-flink-kubernetes-operator-1.1.0-release-announcement/index.html b/content/2022/07/25/apache-flink-kubernetes-operator-1.1.0-release-announcement/index.html
index 387e1eb..c518c95 100644
--- a/content/2022/07/25/apache-flink-kubernetes-operator-1.1.0-release-announcement/index.html
+++ b/content/2022/07/25/apache-flink-kubernetes-operator-1.1.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/08/24/apache-flink-1.15.2-release-announcement/index.html b/content/2022/08/24/apache-flink-1.15.2-release-announcement/index.html
index 16cbf5e..a344a05 100644
--- a/content/2022/08/24/apache-flink-1.15.2-release-announcement/index.html
+++ b/content/2022/08/24/apache-flink-1.15.2-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/08/29/apache-flink-table-store-0.2.0-release-announcement/index.html b/content/2022/08/29/apache-flink-table-store-0.2.0-release-announcement/index.html
index d92d9d0..d7540ab 100644
--- a/content/2022/08/29/apache-flink-table-store-0.2.0-release-announcement/index.html
+++ b/content/2022/08/29/apache-flink-table-store-0.2.0-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/09/08/regarding-akkas-licensing-change/index.html b/content/2022/09/08/regarding-akkas-licensing-change/index.html
index 1aed0b1..15fa27a 100644
--- a/content/2022/09/08/regarding-akkas-licensing-change/index.html
+++ b/content/2022/09/08/regarding-akkas-licensing-change/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/09/28/apache-flink-1.14.6-release-announcement/index.html b/content/2022/09/28/apache-flink-1.14.6-release-announcement/index.html
index 682e1a5..67e8c0f 100644
--- a/content/2022/09/28/apache-flink-1.14.6-release-announcement/index.html
+++ b/content/2022/09/28/apache-flink-1.14.6-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/10/07/apache-flink-kubernetes-operator-1.2.0-release-announcement/index.html b/content/2022/10/07/apache-flink-kubernetes-operator-1.2.0-release-announcement/index.html
index a162486..7748c7c 100644
--- a/content/2022/10/07/apache-flink-kubernetes-operator-1.2.0-release-announcement/index.html
+++ b/content/2022/10/07/apache-flink-kubernetes-operator-1.2.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/10/13/apache-flink-table-store-0.2.1-release-announcement/index.html b/content/2022/10/13/apache-flink-table-store-0.2.1-release-announcement/index.html
index f8a0302..b205923 100644
--- a/content/2022/10/13/apache-flink-table-store-0.2.1-release-announcement/index.html
+++ b/content/2022/10/13/apache-flink-table-store-0.2.1-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/10/28/announcing-the-release-of-apache-flink-1.16/index.html b/content/2022/10/28/announcing-the-release-of-apache-flink-1.16/index.html
index afe540b..cb669e7 100644
--- a/content/2022/10/28/announcing-the-release-of-apache-flink-1.16/index.html
+++ b/content/2022/10/28/announcing-the-release-of-apache-flink-1.16/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/11/10/apache-flink-1.15.3-release-announcement/index.html b/content/2022/11/10/apache-flink-1.15.3-release-announcement/index.html
index 200677f..610dcdc 100644
--- a/content/2022/11/10/apache-flink-1.15.3-release-announcement/index.html
+++ b/content/2022/11/10/apache-flink-1.15.3-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/index.html b/content/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/index.html
index a5f8649..998bad9 100644
--- a/content/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/index.html
+++ b/content/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2022/12/14/apache-flink-kubernetes-operator-1.3.0-release-announcement/index.html b/content/2022/12/14/apache-flink-kubernetes-operator-1.3.0-release-announcement/index.html
index 9248bf0..a35823e 100644
--- a/content/2022/12/14/apache-flink-kubernetes-operator-1.3.0-release-announcement/index.html
+++ b/content/2022/12/14/apache-flink-kubernetes-operator-1.3.0-release-announcement/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/01/10/apache-flink-kubernetes-operator-1.3.1-release-announcement/index.html b/content/2023/01/10/apache-flink-kubernetes-operator-1.3.1-release-announcement/index.html
index 456437b..e0fde4f 100644
--- a/content/2023/01/10/apache-flink-kubernetes-operator-1.3.1-release-announcement/index.html
+++ b/content/2023/01/10/apache-flink-kubernetes-operator-1.3.1-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/01/13/apache-flink-table-store-0.3.0-release-announcement/index.html b/content/2023/01/13/apache-flink-table-store-0.3.0-release-announcement/index.html
index 5a95f3a..e75dbcf 100644
--- a/content/2023/01/13/apache-flink-table-store-0.3.0-release-announcement/index.html
+++ b/content/2023/01/13/apache-flink-table-store-0.3.0-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/01/20/delegation-token-framework-obtain-distribute-and-use-temporary-credentials-automatically/index.html b/content/2023/01/20/delegation-token-framework-obtain-distribute-and-use-temporary-credentials-automatically/index.html
index 67efe20..98e924e 100644
--- a/content/2023/01/20/delegation-token-framework-obtain-distribute-and-use-temporary-credentials-automatically/index.html
+++ b/content/2023/01/20/delegation-token-framework-obtain-distribute-and-use-temporary-credentials-automatically/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/01/30/apache-flink-1.16.1-release-announcement/index.html b/content/2023/01/30/apache-flink-1.16.1-release-announcement/index.html
index bb7c3b3..c21ac61 100644
--- a/content/2023/01/30/apache-flink-1.16.1-release-announcement/index.html
+++ b/content/2023/01/30/apache-flink-1.16.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/02/27/apache-flink-kubernetes-operator-1.4.0-release-announcement/index.html b/content/2023/02/27/apache-flink-kubernetes-operator-1.4.0-release-announcement/index.html
index 9d2c525..a0bc5b0 100644
--- a/content/2023/02/27/apache-flink-kubernetes-operator-1.4.0-release-announcement/index.html
+++ b/content/2023/02/27/apache-flink-kubernetes-operator-1.4.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/03/15/apache-flink-1.15.4-release-announcement/index.html b/content/2023/03/15/apache-flink-1.15.4-release-announcement/index.html
index f89aea6..034abcc 100644
--- a/content/2023/03/15/apache-flink-1.15.4-release-announcement/index.html
+++ b/content/2023/03/15/apache-flink-1.15.4-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/03/23/announcing-the-release-of-apache-flink-1.17/index.html b/content/2023/03/23/announcing-the-release-of-apache-flink-1.17/index.html
index b0f3b83..452594a 100644
--- a/content/2023/03/23/announcing-the-release-of-apache-flink-1.17/index.html
+++ b/content/2023/03/23/announcing-the-release-of-apache-flink-1.17/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/04/19/apache-flink-ml-2.2.0-release-announcement/index.html b/content/2023/04/19/apache-flink-ml-2.2.0-release-announcement/index.html
index 23175a6..471b908 100644
--- a/content/2023/04/19/apache-flink-ml-2.2.0-release-announcement/index.html
+++ b/content/2023/04/19/apache-flink-ml-2.2.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/index.html b/content/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/index.html
index 0d81d46..87ed788 100644
--- a/content/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/index.html
+++ b/content/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/05/09/howto-migrate-a-real-life-batch-pipeline-from-the-dataset-api-to-the-datastream-api/index.html b/content/2023/05/09/howto-migrate-a-real-life-batch-pipeline-from-the-dataset-api-to-the-datastream-api/index.html
index 01588ef..d4c592c 100644
--- a/content/2023/05/09/howto-migrate-a-real-life-batch-pipeline-from-the-dataset-api-to-the-datastream-api/index.html
+++ b/content/2023/05/09/howto-migrate-a-real-life-batch-pipeline-from-the-dataset-api-to-the-datastream-api/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/05/12/howto-test-a-batch-source-with-the-new-source-framework/index.html b/content/2023/05/12/howto-test-a-batch-source-with-the-new-source-framework/index.html
index 597d02b..35b595b 100644
--- a/content/2023/05/12/howto-test-a-batch-source-with-the-new-source-framework/index.html
+++ b/content/2023/05/12/howto-test-a-batch-source-with-the-new-source-framework/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/05/17/apache-flink-kubernetes-operator-1.5.0-release-announcement/index.html b/content/2023/05/17/apache-flink-kubernetes-operator-1.5.0-release-announcement/index.html
index b891131..8b39864 100644
--- a/content/2023/05/17/apache-flink-kubernetes-operator-1.5.0-release-announcement/index.html
+++ b/content/2023/05/17/apache-flink-kubernetes-operator-1.5.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/05/25/apache-flink-1.16.2-release-announcement/index.html b/content/2023/05/25/apache-flink-1.16.2-release-announcement/index.html
index 0f7b358..2d8a770 100644
--- a/content/2023/05/25/apache-flink-1.16.2-release-announcement/index.html
+++ b/content/2023/05/25/apache-flink-1.16.2-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/05/25/apache-flink-1.17.1-release-announcement/index.html b/content/2023/05/25/apache-flink-1.17.1-release-announcement/index.html
index 14b4b53..bfeb3bf 100644
--- a/content/2023/05/25/apache-flink-1.17.1-release-announcement/index.html
+++ b/content/2023/05/25/apache-flink-1.17.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/07/03/sigmod-systems-award-for-apache-flink/index.html b/content/2023/07/03/sigmod-systems-award-for-apache-flink/index.html
index 19051b8..2db1834 100644
--- a/content/2023/07/03/sigmod-systems-award-for-apache-flink/index.html
+++ b/content/2023/07/03/sigmod-systems-award-for-apache-flink/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/08/04/announcing-three-new-apache-flink-connectors-the-new-connector-versioning-strategy-and-externalization/index.html b/content/2023/08/04/announcing-three-new-apache-flink-connectors-the-new-connector-versioning-strategy-and-externalization/index.html
index c3e48d1..7a2ddd5 100644
--- a/content/2023/08/04/announcing-three-new-apache-flink-connectors-the-new-connector-versioning-strategy-and-externalization/index.html
+++ b/content/2023/08/04/announcing-three-new-apache-flink-connectors-the-new-connector-versioning-strategy-and-externalization/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/08/15/apache-flink-kubernetes-operator-1.6.0-release-announcement/index.html b/content/2023/08/15/apache-flink-kubernetes-operator-1.6.0-release-announcement/index.html
index 4c804a6..9c7afda 100644
--- a/content/2023/08/15/apache-flink-kubernetes-operator-1.6.0-release-announcement/index.html
+++ b/content/2023/08/15/apache-flink-kubernetes-operator-1.6.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/09/19/stateful-functions-3.3.0-release-announcement/index.html b/content/2023/09/19/stateful-functions-3.3.0-release-announcement/index.html
index eddff45..8b15fcf 100644
--- a/content/2023/09/19/stateful-functions-3.3.0-release-announcement/index.html
+++ b/content/2023/09/19/stateful-functions-3.3.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/10/24/announcing-the-release-of-apache-flink-1.18/index.html b/content/2023/10/24/announcing-the-release-of-apache-flink-1.18/index.html
index 8c27705..2931105 100644
--- a/content/2023/10/24/announcing-the-release-of-apache-flink-1.18/index.html
+++ b/content/2023/10/24/announcing-the-release-of-apache-flink-1.18/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/10/27/apache-flink-kubernetes-operator-1.6.1-release-announcement/index.html b/content/2023/10/27/apache-flink-kubernetes-operator-1.6.1-release-announcement/index.html
index 9a23519..5b331bd 100644
--- a/content/2023/10/27/apache-flink-kubernetes-operator-1.6.1-release-announcement/index.html
+++ b/content/2023/10/27/apache-flink-kubernetes-operator-1.6.1-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/11/22/apache-flink-kubernetes-operator-1.7.0-release-announcement/index.html b/content/2023/11/22/apache-flink-kubernetes-operator-1.7.0-release-announcement/index.html
index cd10a2b..9b3b843 100644
--- a/content/2023/11/22/apache-flink-kubernetes-operator-1.7.0-release-announcement/index.html
+++ b/content/2023/11/22/apache-flink-kubernetes-operator-1.7.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/11/29/apache-flink-1.16.3-release-announcement/index.html b/content/2023/11/29/apache-flink-1.16.3-release-announcement/index.html
index 83b8139..423169b 100644
--- a/content/2023/11/29/apache-flink-1.16.3-release-announcement/index.html
+++ b/content/2023/11/29/apache-flink-1.16.3-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2023/11/29/apache-flink-1.17.2-release-announcement/index.html b/content/2023/11/29/apache-flink-1.17.2-release-announcement/index.html
index 87d72d2..b91be1d 100644
--- a/content/2023/11/29/apache-flink-1.17.2-release-announcement/index.html
+++ b/content/2023/11/29/apache-flink-1.17.2-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/01/19/apache-flink-1.18.1-release-announcement/index.html b/content/2024/01/19/apache-flink-1.18.1-release-announcement/index.html
index b333853..8fcc9cd 100644
--- a/content/2024/01/19/apache-flink-1.18.1-release-announcement/index.html
+++ b/content/2024/01/19/apache-flink-1.18.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/03/18/announcing-the-release-of-apache-flink-1.19/index.html b/content/2024/03/18/announcing-the-release-of-apache-flink-1.19/index.html
index 17bf489..d3a0eae 100644
--- a/content/2024/03/18/announcing-the-release-of-apache-flink-1.19/index.html
+++ b/content/2024/03/18/announcing-the-release-of-apache-flink-1.19/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/03/21/apache-flink-kubernetes-operator-1.8.0-release-announcement/index.html b/content/2024/03/21/apache-flink-kubernetes-operator-1.8.0-release-announcement/index.html
index 1db1d24..e1a9d2a 100644
--- a/content/2024/03/21/apache-flink-kubernetes-operator-1.8.0-release-announcement/index.html
+++ b/content/2024/03/21/apache-flink-kubernetes-operator-1.8.0-release-announcement/index.html
@@ -32,7 +32,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/05/17/apache-flink-cdc-3.1.0-release-announcement/index.html b/content/2024/05/17/apache-flink-cdc-3.1.0-release-announcement/index.html
index c0aeecc..ff1b6dd 100644
--- a/content/2024/05/17/apache-flink-cdc-3.1.0-release-announcement/index.html
+++ b/content/2024/05/17/apache-flink-cdc-3.1.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/06/14/apache-flink-1.19.1-release-announcement/index.html b/content/2024/06/14/apache-flink-1.19.1-release-announcement/index.html
index a0348e2..a817e98 100644
--- a/content/2024/06/14/apache-flink-1.19.1-release-announcement/index.html
+++ b/content/2024/06/14/apache-flink-1.19.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/index.html b/content/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/index.html
index bc1d193..c9dfd73 100644
--- a/content/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/index.html
+++ b/content/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/07/02/apache-flink-kubernetes-operator-1.9.0-release-announcement/index.html b/content/2024/07/02/apache-flink-kubernetes-operator-1.9.0-release-announcement/index.html
index 2841770..c45c126 100644
--- a/content/2024/07/02/apache-flink-kubernetes-operator-1.9.0-release-announcement/index.html
+++ b/content/2024/07/02/apache-flink-kubernetes-operator-1.9.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/08/02/announcing-the-release-of-apache-flink-1.20/index.html b/content/2024/08/02/announcing-the-release-of-apache-flink-1.20/index.html
index 55deda8..5a03a4b 100644
--- a/content/2024/08/02/announcing-the-release-of-apache-flink-1.20/index.html
+++ b/content/2024/08/02/announcing-the-release-of-apache-flink-1.20/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/09/05/apache-flink-cdc-3.2.0-release-announcement/index.html b/content/2024/09/05/apache-flink-cdc-3.2.0-release-announcement/index.html
index fef5a80..1ad2540 100644
--- a/content/2024/09/05/apache-flink-cdc-3.2.0-release-announcement/index.html
+++ b/content/2024/09/05/apache-flink-cdc-3.2.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/10/23/preview-release-of-apache-flink-2.0/index.html b/content/2024/10/23/preview-release-of-apache-flink-2.0/index.html
index 14fd3c4..2a7a018 100644
--- a/content/2024/10/23/preview-release-of-apache-flink-2.0/index.html
+++ b/content/2024/10/23/preview-release-of-apache-flink-2.0/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/10/25/apache-flink-kubernetes-operator-1.10.0-release-announcement/index.html b/content/2024/10/25/apache-flink-kubernetes-operator-1.10.0-release-announcement/index.html
index 27af15e..f5ec0a7 100644
--- a/content/2024/10/25/apache-flink-kubernetes-operator-1.10.0-release-announcement/index.html
+++ b/content/2024/10/25/apache-flink-kubernetes-operator-1.10.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/11/25/introducing-the-new-amazon-kinesis-data-stream-and-amazon-dynamodb-stream-sources/index.html b/content/2024/11/25/introducing-the-new-amazon-kinesis-data-stream-and-amazon-dynamodb-stream-sources/index.html
index 68f5ca2..663fe10 100644
--- a/content/2024/11/25/introducing-the-new-amazon-kinesis-data-stream-and-amazon-dynamodb-stream-sources/index.html
+++ b/content/2024/11/25/introducing-the-new-amazon-kinesis-data-stream-and-amazon-dynamodb-stream-sources/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/11/27/apache-flink-cdc-3.2.1-release-announcement/index.html b/content/2024/11/27/apache-flink-cdc-3.2.1-release-announcement/index.html
index 95652ca..282707f 100644
--- a/content/2024/11/27/apache-flink-cdc-3.2.1-release-announcement/index.html
+++ b/content/2024/11/27/apache-flink-cdc-3.2.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2024/12/05/introducing-the-new-prometheus-connector/index.html b/content/2024/12/05/introducing-the-new-prometheus-connector/index.html
index 5f6cff3..8cc0531 100644
--- a/content/2024/12/05/introducing-the-new-prometheus-connector/index.html
+++ b/content/2024/12/05/introducing-the-new-prometheus-connector/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/01/21/apache-flink-cdc-3.3.0-release-announcement/index.html b/content/2025/01/21/apache-flink-cdc-3.3.0-release-announcement/index.html
index f9c7d6d..1c3ce23 100644
--- a/content/2025/01/21/apache-flink-cdc-3.3.0-release-announcement/index.html
+++ b/content/2025/01/21/apache-flink-cdc-3.3.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/02/12/apache-flink-1.19.2-release-announcement/index.html b/content/2025/02/12/apache-flink-1.19.2-release-announcement/index.html
index 86f2b3a..f026871 100644
--- a/content/2025/02/12/apache-flink-1.19.2-release-announcement/index.html
+++ b/content/2025/02/12/apache-flink-1.19.2-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/02/12/apache-flink-1.20.1-release-announcement/index.html b/content/2025/02/12/apache-flink-1.20.1-release-announcement/index.html
index 7fe1acd..b2d8028 100644
--- a/content/2025/02/12/apache-flink-1.20.1-release-announcement/index.html
+++ b/content/2025/02/12/apache-flink-1.20.1-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/03/03/apache-flink-kubernetes-operator-1.11.0-release-announcement/index.html b/content/2025/03/03/apache-flink-kubernetes-operator-1.11.0-release-announcement/index.html
index 7bd8c06..e612590 100644
--- a/content/2025/03/03/apache-flink-kubernetes-operator-1.11.0-release-announcement/index.html
+++ b/content/2025/03/03/apache-flink-kubernetes-operator-1.11.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/03/24/apache-flink-2.0.0-a-new-era-of-real-time-data-processing/index.html b/content/2025/03/24/apache-flink-2.0.0-a-new-era-of-real-time-data-processing/index.html
index 3af74ff..4cfce5b 100644
--- a/content/2025/03/24/apache-flink-2.0.0-a-new-era-of-real-time-data-processing/index.html
+++ b/content/2025/03/24/apache-flink-2.0.0-a-new-era-of-real-time-data-processing/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/04/30/introducing-the-externalized-kudu-connector/index.html b/content/2025/04/30/introducing-the-externalized-kudu-connector/index.html
index fcda619..5d53b44 100644
--- a/content/2025/04/30/introducing-the-externalized-kudu-connector/index.html
+++ b/content/2025/04/30/introducing-the-externalized-kudu-connector/index.html
@@ -26,7 +26,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -280,7 +280,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/05/16/apache-flink-cdc-3.4.0-release-announcement/index.html b/content/2025/05/16/apache-flink-cdc-3.4.0-release-announcement/index.html
index 6e203ac..e6a2581 100644
--- a/content/2025/05/16/apache-flink-cdc-3.4.0-release-announcement/index.html
+++ b/content/2025/05/16/apache-flink-cdc-3.4.0-release-announcement/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/06/03/apache-flink-kubernetes-operator-1.12.0-release-announcement/index.html b/content/2025/06/03/apache-flink-kubernetes-operator-1.12.0-release-announcement/index.html
index 57e82b7..08a9202 100644
--- a/content/2025/06/03/apache-flink-kubernetes-operator-1.12.0-release-announcement/index.html
+++ b/content/2025/06/03/apache-flink-kubernetes-operator-1.12.0-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/07/10/apache-flink-1.19.3-release-announcement/index.html b/content/2025/07/10/apache-flink-1.19.3-release-announcement/index.html
index a316e98..ffb45dc 100644
--- a/content/2025/07/10/apache-flink-1.19.3-release-announcement/index.html
+++ b/content/2025/07/10/apache-flink-1.19.3-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/07/10/apache-flink-1.20.2-release-announcement/index.html b/content/2025/07/10/apache-flink-1.20.2-release-announcement/index.html
index c721b2d..a796c08 100644
--- a/content/2025/07/10/apache-flink-1.20.2-release-announcement/index.html
+++ b/content/2025/07/10/apache-flink-1.20.2-release-announcement/index.html
@@ -30,7 +30,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/index.html b/content/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/index.html
index f80db76..3f2a944 100644
--- a/content/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/index.html
+++ b/content/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/index.html
@@ -28,7 +28,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/index.html b/content/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/index.html
new file mode 100644
index 0000000..2dce030
--- /dev/null
+++ b/content/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/index.html
@@ -0,0 +1,752 @@
+
+<!DOCTYPE html>
+<html lang="en" dir=ZgotmplZ>
+
+<head>
+  
+
+
+<link rel="stylesheet" href="/bootstrap/css/bootstrap.min.css">
+<script src="/bootstrap/js/bootstrap.bundle.min.js"></script>
+<link rel="stylesheet" type="text/css" href="/font-awesome/css/font-awesome.min.css">
+<script src="/js/anchor.min.js"></script>
+<script src="/js/flink.js"></script>
+<link rel="canonical" href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">
+
+  <meta charset="UTF-8">
+<meta name="viewport" content="width=device-width, initial-scale=1.0">
+<meta name="description" content="The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!
+This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in multi-tables (with frequent table structure changes) synchronization scenario, many issues encountered in the transform and Schema evolution frameworks have also been fixed.
+Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!">
+<meta name="theme-color" content="#FFFFFF"><meta property="og:title" content="Apache Flink CDC 3.5.0 Release Announcement" />
+<meta property="og:description" content="The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!
+This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in multi-tables (with frequent table structure changes) synchronization scenario, many issues encountered in the transform and Schema evolution frameworks have also been fixed.
+Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!" />
+<meta property="og:type" content="article" />
+<meta property="og:url" content="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/" /><meta property="article:section" content="posts" />
+<meta property="article:published_time" content="2025-09-26T08:00:00+00:00" />
+<meta property="article:modified_time" content="2025-09-26T08:00:00+00:00" />
+<title>Apache Flink CDC 3.5.0 Release Announcement | Apache Flink</title>
+<link rel="manifest" href="/manifest.json">
+<link rel="icon" href="/favicon.png" type="image/x-icon">
+<link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
+<!--
+Made with Book Theme
+https://github.com/alex-shpak/hugo-book
+-->
+
+  <meta name="generator" content="Hugo 0.124.1">
+
+    
+    <script>
+      var _paq = window._paq = window._paq || [];
+       
+       
+      _paq.push(['disableCookies']);
+       
+      _paq.push(["setDomains", ["*.flink.apache.org","*.nightlies.apache.org/flink"]]);
+      _paq.push(['trackPageView']);
+      _paq.push(['enableLinkTracking']);
+      (function() {
+        var u="//analytics.apache.org/";
+        _paq.push(['setTrackerUrl', u+'matomo.php']);
+        _paq.push(['setSiteId', '1']);
+        var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
+        g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
+      })();
+    </script>
+    
+</head>
+
+<body dir=ZgotmplZ>
+  
+
+
+<header>
+  <nav class="navbar navbar-expand-xl">
+    <div class="container-fluid">
+      <a class="navbar-brand" href="/">
+        <img src="/img/logo/png/100/flink_squirrel_100_color.png" alt="Apache Flink" height="47" width="47" class="d-inline-block align-text-middle">
+        <span>Apache Flink</span>
+      </a>
+      <button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarSupportedContent" aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
+          <i class="fa fa-bars navbar-toggler-icon"></i>
+      </button>
+      <div class="collapse navbar-collapse" id="navbarSupportedContent">
+        <ul class="navbar-nav">
+          
+
+
+
+
+
+    
+      
+  
+    <li class="nav-item dropdown">
+      <a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">About</a>
+      <ul class="dropdown-menu">
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/flink-architecture/">Architecture</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/flink-applications/">Applications</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/flink-operations/">Operations</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/use-cases/">Use Cases</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/powered-by/">Powered By</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/roadmap/">Roadmap</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/community/">Community & Project Info</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/security/">Security</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/what-is-flink/special-thanks/">Special Thanks</a>
+  
+
+          </li>
+        
+      </ul>
+    </li>
+  
+
+    
+      
+  
+    <li class="nav-item dropdown">
+      <a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">Getting Started</a>
+      <ul class="dropdown-menu">
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-stable/docs/try-flink/local_installation/">With Flink<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-stable/docs/try-flink-kubernetes-operator/quick-start/">With Flink Kubernetes Operator<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable/docs/get-started/introduction/">With Flink CDC<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-ml-docs-stable/docs/try-flink-ml/quick-start/">With Flink ML<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-statefun-docs-stable/getting-started/project-setup.html">With Flink Stateful Functions<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-stable/docs/learn-flink/overview/">Training Course<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+      </ul>
+    </li>
+  
+
+    
+      
+  
+    <li class="nav-item dropdown">
+      <a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">Documentation</a>
+      <ul class="dropdown-menu">
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-stable/">Flink 2.1 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-lts/">Flink 1.20 (LTS)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-master/">Flink Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-stable/">Kubernetes Operator 1.12 (latest)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main">Kubernetes Operator Main (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-master">CDC Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-ml-docs-stable/">ML 2.3 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-ml-docs-master">ML Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-statefun-docs-stable/">Stateful Functions 3.3 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-statefun-docs-master">Stateful Functions Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    </a>
+  
+
+          </li>
+        
+      </ul>
+    </li>
+  
+
+    
+      
+  
+    <li class="nav-item dropdown">
+      <a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">How to Contribute</a>
+      <ul class="dropdown-menu">
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/overview/">Overview</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/contribute-code/">Contribute Code</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/reviewing-prs/">Review Pull Requests</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/code-style-and-quality-preamble/">Code Style and Quality Guide</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/contribute-documentation/">Contribute Documentation</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/documentation-style-guide/">Documentation Style Guide</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/improve-website/">Contribute to the Website</a>
+  
+
+          </li>
+        
+          <li>
+            
+  
+    <a class="dropdown-item" href="/how-to-contribute/getting-help/">Getting Help</a>
+  
+
+          </li>
+        
+      </ul>
+    </li>
+  
+
+    
+
+
+    
+      
+  
+    <li class="nav-item">
+      
+  
+    <a class="nav-link" href="/posts/">Flink Blog</a>
+  
+
+    </li>
+  
+
+    
+      
+  
+    <li class="nav-item">
+      
+  
+    <a class="nav-link" href="/downloads/">Downloads</a>
+  
+
+    </li>
+  
+
+    
+
+
+    
+
+
+
+
+
+
+
+
+
+        </ul>
+        <div class="book-search">
+          <div class="book-search-spinner hidden">
+            <i class="fa fa-refresh fa-spin"></i>
+          </div>
+          <form class="search-bar d-flex" onsubmit="return false;"su>
+            <input type="text" id="book-search-input" placeholder="Search" aria-label="Search" maxlength="64" data-hotkeys="s/">
+            <i class="fa fa-search search"></i>
+            <i class="fa fa-circle-o-notch fa-spin spinner"></i>
+          </form>
+          <div class="book-search-spinner hidden"></div>
+          <ul id="book-search-results"></ul>
+        </div>
+      </div>
+    </div>
+  </nav>
+  <div class="navbar-clearfix"></div>
+</header>
+ 
+  
+      <main class="flex">
+        <section class="container book-page">
+          
+<article class="markdown">
+    <h1>
+        <a href="/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </h1>
+    
+
+
+  September 26, 2025 -
+
+
+
+  Yanquan Lv
+
+
+
+
+    <p><p>The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!<br>
+This release introduces new pipeline connectors for <a href="https://fluss.apache.org">Apache Fluss</a> and PostgreSQL, and improves usability in multi-tables (with frequent table structure changes) synchronization scenario, many issues encountered in the transform and Schema evolution frameworks have also been fixed.</p>
+<p>Flink CDC release packages are available at <a href="https://flink.apache.org/downloads.html#flink-cdc">Releases Page</a>,
+and documentations are available at <a href="https://nightlies.apache.org/flink/flink-cdc-docs-release-3.5">Flink CDC documentation</a> page.
+Looking forward to any feedback from the community through the Flink <a href="https://flink.apache.org/community.html#mailing-lists">mailing lists</a> or <a href="https://issues.apache.org/jira/browse/flink">JIRA</a>!</p>
+<h1 id="highlights">
+  Highlights
+  <a class="anchor" href="#highlights">#</a>
+</h1>
+<h2 id="pipeline-core">
+  Pipeline Core
+  <a class="anchor" href="#pipeline-core">#</a>
+</h2>
+<h2 id="schema-evolution-optimization">
+  Schema Evolution Optimization
+  <a class="anchor" href="#schema-evolution-optimization">#</a>
+</h2>
+<ul>
+<li>[FLINK-38045] During job failover, reissue the schema information stored in the state of Source to enhance the correctness of handling schema changes and make transform operator stateless.</li>
+<li>[FLINK-38243][FLINK-38244] Properly handle schema evolution for case-sensitive table and column names.</li>
+</ul>
+<h2 id="transform-enhancement">
+  Transform Enhancement
+  <a class="anchor" href="#transform-enhancement">#</a>
+</h2>
+<ul>
+<li>FLINK-38079] Enhance precision support for DATE and TIME types to improve temporal handling in built-in and user-defined functions.</li>
+</ul>
+<h2 id="incremental-source-framework">
+  Incremental Source Framework
+  <a class="anchor" href="#incremental-source-framework">#</a>
+</h2>
+<ul>
+<li>[FLINK-38265] Properly design the termination logic for stream split to prevent exceptions causing the job to get stuck.</li>
+</ul>
+<h2 id="pipeline-connectors">
+  Pipeline Connectors
+  <a class="anchor" href="#pipeline-connectors">#</a>
+</h2>
+<h3 id="apache-fluss-newly-added">
+  Apache Fluss (newly added)
+  <a class="anchor" href="#apache-fluss-newly-added">#</a>
+</h3>
+<ul>
+<li>[FLINK-37958] Apache Fluss (Incubating) is a streaming storage built for real-time analytics which can serve as the real-time data layer for Lakehouse architectures. In this version, Fluss is supported to be the sink for Pipeline jobs.</li>
+</ul>
+<h3 id="postgresql-newly-added">
+  PostgreSQL (newly added)
+  <a class="anchor" href="#postgresql-newly-added">#</a>
+</h3>
+<ul>
+<li>[FLINK-35670] PostgreSQL is a powerful, open source object-relational database system with over 35 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance. In this version, PostgreSQL is supported to be the source for Pipeline jobs.</li>
+</ul>
+<h3 id="apache-paimon">
+  Apache Paimon
+  <a class="anchor" href="#apache-paimon">#</a>
+</h3>
+<ul>
+<li>[FLINK-38142] Bump Paimon version to 1.2.0.</li>
+<li>[FLINK-38206] Support writing to existed table with inconsistent schema with upstream.</li>
+<li>[FLINK-37824] Support column comments when creating a new table.</li>
+</ul>
+<h2 id="source-connectors">
+  Source Connectors
+  <a class="anchor" href="#source-connectors">#</a>
+</h2>
+<h3 id="mysql-cdc">
+  MySQL CDC
+  <a class="anchor" href="#mysql-cdc">#</a>
+</h3>
+<ul>
+<li>[FLINK-37065] Fix potential data loss caused by GTID out-of-order scenarios.</li>
+<li>[FLINK-38238] Add support for processing table with varchar(0) column.</li>
+</ul>
+<h3 id="postgresql-cdc">
+  PostgreSQL CDC
+  <a class="anchor" href="#postgresql-cdc">#</a>
+</h3>
+<ul>
+<li>[FLINK-37479] Support discovering partitioned tables.</li>
+<li>[FLINK-37738] Support reading changelog as append only mode.</li>
+</ul>
+<h3 id="oceanbase-cdc">
+  OceanBase CDC
+  <a class="anchor" href="#oceanbase-cdc">#</a>
+</h3>
+<ul>
+<li>[FLINK-38111] Migrate OceanBase CDC Connector from LogProxy to OceanBase Binlog Service.</li>
+</ul>
+<h1 id="list-of-contributors">
+  List of Contributors
+  <a class="anchor" href="#list-of-contributors">#</a>
+</h1>
+<p>We would like to express gratitude to all the contributors working on this release:</p>
+<p>Sachin Mittal, suhwan, Lanny Boarts, hql0312, yuanoOo, gongzhongqiang, Kunni, North Lin, suntectec, SeungMin, ChengJie1053, kangzai, Ihor Mielientiev, Vinh Pham, wudi, Marta Paes, Shawn Huang, zhangchao.doovvv, linjc13, Sergei Morozov, MOBIN, Junbo Wang, junmuz, lvyanquan, Thorne, zhuxt2015, Xin Gong, linjianchang, tbpure, Tianzhu Wen, yuxiqian.yxq, Naci Simsek, Мухутдинов Артур, Hongshun Wang, proletarians, wangjunbo, Chao Zhang, ouyangwulin, Hang Ruan, Junbo wang, yuxiqian, wuzexian</p>
+</p>
+</article>
+
+          
+
+
+
+  
+    
+    <div class="edit-this-page">
+      <p>
+        <a href="https://cwiki.apache.org/confluence/display/FLINK/Flink+Translation+Specifications">Want to contribute translation?</a>
+      </p>
+      <p>
+        <a href="//github.com/apache/flink-web/edit/asf-site/docs/content/posts/2025-09-26-release-cdc-3.5.0.md">
+          Edit This Page<i class="fa fa-edit fa-fw"></i> 
+        </a>
+      </p>
+    </div>
+
+        </section>
+        
+          <aside class="book-toc">
+            
+
+
+<nav id="TableOfContents"><h3>On This Page <a href="javascript:void(0)" class="toc" onclick="collapseToc()"><i class="fa fa-times" aria-hidden="true"></i></a></h3>
+  <ul>
+    <li><a href="#highlights">Highlights</a>
+      <ul>
+        <li><a href="#pipeline-core">Pipeline Core</a></li>
+        <li><a href="#schema-evolution-optimization">Schema Evolution Optimization</a></li>
+        <li><a href="#transform-enhancement">Transform Enhancement</a></li>
+        <li><a href="#incremental-source-framework">Incremental Source Framework</a></li>
+        <li><a href="#pipeline-connectors">Pipeline Connectors</a>
+          <ul>
+            <li><a href="#apache-fluss-newly-added">Apache Fluss (newly added)</a></li>
+            <li><a href="#postgresql-newly-added">PostgreSQL (newly added)</a></li>
+            <li><a href="#apache-paimon">Apache Paimon</a></li>
+          </ul>
+        </li>
+        <li><a href="#source-connectors">Source Connectors</a>
+          <ul>
+            <li><a href="#mysql-cdc">MySQL CDC</a></li>
+            <li><a href="#postgresql-cdc">PostgreSQL CDC</a></li>
+            <li><a href="#oceanbase-cdc">OceanBase CDC</a></li>
+          </ul>
+        </li>
+      </ul>
+    </li>
+    <li><a href="#list-of-contributors">List of Contributors</a></li>
+  </ul>
+</nav>
+
+
+          </aside>
+          <aside class="expand-toc hidden">
+            <a class="toc" onclick="expandToc()" href="javascript:void(0)">
+              <i class="fa fa-bars" aria-hidden="true"></i>
+            </a>
+          </aside>
+        
+      </main>
+
+      <footer>
+        
+
+
+<div class="separator"></div>
+<div class="panels">
+  <div class="wrapper">
+      <div class="panel">
+        <ul>
+          <li>
+            <a href="https://flink-packages.org/">flink-packages.org</a>
+          </li>
+          <li>
+            <a href="https://www.apache.org/">Apache Software Foundation</a>
+          </li>
+          <li>
+            <a href="https://www.apache.org/licenses/">License</a>
+          </li>
+          
+          
+          
+            
+          
+            
+          
+          
+
+          
+            
+              
+            
+          
+            
+              
+                <li>
+                  <a  href="/zh/">
+                    <i class="fa fa-globe" aria-hidden="true"></i>&nbsp;中文版
+                  </a>
+                </li>
+              
+            
+          
+       </ul>
+      </div>
+      <div class="panel">
+        <ul>
+          <li>
+            <a href="/what-is-flink/security">Security</a-->
+          </li>
+          <li>
+            <a href="https://www.apache.org/foundation/sponsorship.html">Donate</a>
+          </li>
+          <li>
+            <a href="https://www.apache.org/foundation/thanks.html">Thanks</a>
+          </li>
+       </ul>
+      </div>
+      <div class="panel icons">
+        <div>
+          <a href="/posts">
+            <div class="icon flink-blog-icon"></div>
+            <span>Flink blog</span>
+          </a>
+        </div>
+        <div>
+          <a href="https://github.com/apache/flink">
+            <div class="icon flink-github-icon"></div>
+            <span>Github</span>
+          </a>
+        </div>
+        <div>
+          <a href="https://twitter.com/apacheflink">
+            <div class="icon flink-twitter-icon"></div>
+            <span>Twitter</span>
+          </a>
+        </div>
+      </div>
+  </div>
+</div>
+
+<hr/>
+
+<div class="container disclaimer">
+  <p>The contents of this website are © 2024 Apache Software Foundation under the terms of the Apache License v2. Apache Flink, Flink, and the Flink logo are either registered trademarks or trademarks of The Apache Software Foundation in the United States and other countries.</p>
+</div>
+
+
+
+      </footer>
+    
+  </body>
+</html>
+
+
+
+
+
+
diff --git a/content/404.html b/content/404.html
index 0fd7d4f..955e573 100644
--- a/content/404.html
+++ b/content/404.html
@@ -15,7 +15,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/404.html" title="404 Page not found">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
diff --git a/content/categories/index.html b/content/categories/index.html
index 61e0296..de32555 100644
--- a/content/categories/index.html
+++ b/content/categories/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/categories/" title="Categories">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/categories/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-cdc-master/index.html b/content/documentation/flink-cdc-master/index.html
index dfe1898..8460d6f 100644
--- a/content/documentation/flink-cdc-master/index.html
+++ b/content/documentation/flink-cdc-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-cdc-master/" title="CDC Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-cdc-stable/index.html b/content/documentation/flink-cdc-stable/index.html
index a5e26c9..cfb40d6 100644
--- a/content/documentation/flink-cdc-stable/index.html
+++ b/content/documentation/flink-cdc-stable/index.html
@@ -22,13 +22,13 @@
 <meta property="og:url" content="https://flink.apache.org/documentation/flink-cdc-stable/" /><meta property="article:section" content="documentation" />
 
 
-<title>CDC 3.4 (stable) | Apache Flink</title>
+<title>CDC 3.5 (stable) | Apache Flink</title>
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
-<link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-cdc-stable/" title="CDC 3.4 (stable)">
+<link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-cdc-stable/" title="CDC 3.5 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-kubernetes-operator-master/index.html b/content/documentation/flink-kubernetes-operator-master/index.html
index 9c803e4..03d496b 100644
--- a/content/documentation/flink-kubernetes-operator-master/index.html
+++ b/content/documentation/flink-kubernetes-operator-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-kubernetes-operator-master/" title="Kubernetes Operator Main (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-kubernetes-operator-stable/index.html b/content/documentation/flink-kubernetes-operator-stable/index.html
index 547b58b..c6fd206 100644
--- a/content/documentation/flink-kubernetes-operator-stable/index.html
+++ b/content/documentation/flink-kubernetes-operator-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-kubernetes-operator-stable/" title="Kubernetes Operator 1.12 (latest)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-lts/index.html b/content/documentation/flink-lts/index.html
index 26cc369..0f3e08a 100644
--- a/content/documentation/flink-lts/index.html
+++ b/content/documentation/flink-lts/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-lts/" title="Flink 1.20 (LTS)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-master/index.html b/content/documentation/flink-master/index.html
index 71a33b7..bcce372 100644
--- a/content/documentation/flink-master/index.html
+++ b/content/documentation/flink-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-master/" title="Flink Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-stable/index.html b/content/documentation/flink-stable/index.html
index f2c2c68..95ce1c7 100644
--- a/content/documentation/flink-stable/index.html
+++ b/content/documentation/flink-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-stable/" title="Flink 2.1 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-stateful-functions-master/index.html b/content/documentation/flink-stateful-functions-master/index.html
index 0036aaa..d519b23 100644
--- a/content/documentation/flink-stateful-functions-master/index.html
+++ b/content/documentation/flink-stateful-functions-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-stateful-functions-master/" title="Stateful Functions Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flink-stateful-functions-stable/index.html b/content/documentation/flink-stateful-functions-stable/index.html
index e2b85fb..b392715 100644
--- a/content/documentation/flink-stateful-functions-stable/index.html
+++ b/content/documentation/flink-stateful-functions-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flink-stateful-functions-stable/" title="Stateful Functions 3.3 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flinkml-master/index.html b/content/documentation/flinkml-master/index.html
index 92e5d60..b50a923 100644
--- a/content/documentation/flinkml-master/index.html
+++ b/content/documentation/flinkml-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flinkml-master/" title="ML Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/flinkml-stable/index.html b/content/documentation/flinkml-stable/index.html
index b1fc568..0159814 100644
--- a/content/documentation/flinkml-stable/index.html
+++ b/content/documentation/flinkml-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/flinkml-stable/" title="ML 2.3 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/documentation/index.html b/content/documentation/index.html
index b77caa0..3e3998e 100644
--- a/content/documentation/index.html
+++ b/content/documentation/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/documentation/" title="Documentation">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/documentation/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/downloads/index.html b/content/downloads/index.html
index b7f7bbc..ca715de 100644
--- a/content/downloads/index.html
+++ b/content/downloads/index.html
@@ -42,7 +42,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/downloads/" title="Downloads">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -296,7 +296,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -812,7 +812,22 @@
   Apache Flink CDC
   <a class="anchor" href="#apache-flink-cdc">#</a>
 </h2>
-<p>Apache Flink® CDC 3.4 is the latest stable release.</p>
+<p>Apache Flink® CDC 3.5 is the latest stable release.</p>
+<h3 id="apache-flink-cdc-350">
+  Apache Flink CDC 3.5.0
+  <a class="anchor" href="#apache-flink-cdc-350">#</a>
+</h3>
+<p><a href="https://www.apache.org/dyn/closer.lua/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-bin.tar.gz">Apache Flink CDC 3.5.0</a> <a href="https://downloads.apache.org/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-bin.tar.gz.asc">(asc</a>, <a href="https://downloads.apache.org/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-bin.tar.gz.sha512">sha512)</a></p>
+<p><a href="https://www.apache.org/dyn/closer.lua/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-src.tgz">Apache Flink CDC 3.5.0 Source Release</a> <a href="https://downloads.apache.org/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-src.tgz.asc">(asc</a>, <a href="https://downloads.apache.org/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-src.tgz.sha512">sha512)</a></p>
+<p>This component is compatible with Apache Flink version(s):</p>
+<ul>
+<li>
+<p>1.19.x</p>
+</li>
+<li>
+<p>1.20.x</p>
+</li>
+</ul>
 <h3 id="apache-flink-cdc-340">
   Apache Flink CDC 3.4.0
   <a class="anchor" href="#apache-flink-cdc-340">#</a>
@@ -1293,6 +1308,7 @@
   <a class="anchor" href="#apache-flink-cdc-1">#</a>
 </h3>
 <ul>
+<li>Apache Flink CDC 3.5.0 - 2025-09-26 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-bin.tar.gz">Binaries</a>)</li>
 <li>Apache Flink CDC 3.4.0 - 2025-05-16 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.4.0/flink-cdc-3.4.0-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.4.0/flink-cdc-3.4.0-bin.tar.gz">Binaries</a>)</li>
 <li>Apache Flink CDC 3.3.0 - 2025-01-21 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.3.0/flink-cdc-3.3.0-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.3.0/flink-cdc-3.3.0-bin.tar.gz">Binaries</a>)</li>
 <li>Apache Flink CDC 3.2.1 - 2024-11-27 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.2.1/flink-cdc-3.2.1-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.2.1/flink-cdc-3.2.1-bin.tar.gz">Binaries</a>)</li>
@@ -1450,6 +1466,7 @@
         </li>
         <li><a href="#apache-flink-cdc">Apache Flink CDC</a>
           <ul>
+            <li><a href="#apache-flink-cdc-350">Apache Flink CDC 3.5.0</a></li>
             <li><a href="#apache-flink-cdc-340">Apache Flink CDC 3.4.0</a></li>
             <li><a href="#apache-flink-cdc-330">Apache Flink CDC 3.3.0</a></li>
             <li><a href="#apache-flink-cdc-321">Apache Flink CDC 3.2.1</a></li>
diff --git a/content/en.search-data.min.99ba92d527f4c3d92b29f41d78147697fa1d346b3432b5c524cae5356f163ffc.js b/content/en.search-data.min.72f68b6b627b36c5e28a8b961a951bedb080cdb7b03ad224b68e89aa6fa8cb88.js
similarity index 98%
rename from content/en.search-data.min.99ba92d527f4c3d92b29f41d78147697fa1d346b3432b5c524cae5356f163ffc.js
rename to content/en.search-data.min.72f68b6b627b36c5e28a8b961a951bedb080cdb7b03ad224b68e89aa6fa8cb88.js
index 2d8a549..15ccffd 100644
--- a/content/en.search-data.min.99ba92d527f4c3d92b29f41d78147697fa1d346b3432b5c524cae5356f163ffc.js
+++ b/content/en.search-data.min.72f68b6b627b36c5e28a8b961a951bedb080cdb7b03ad224b68e89aa6fa8cb88.js
@@ -139,7 +139,12 @@
 This component is compatible with Apache Flink version(s):
 1.16.x
 1.17.x
-Apache Flink CDC # Apache Flink® CDC 3.4 is the latest stable release.
+Apache Flink CDC # Apache Flink® CDC 3.5 is the latest stable release.
+Apache Flink CDC 3.5.0 # Apache Flink CDC 3.5.0 (asc, sha512)
+Apache Flink CDC 3.5.0 Source Release (asc, sha512)
+This component is compatible with Apache Flink version(s):
+1.19.x
+1.20.x
 Apache Flink CDC 3.4.0 # Apache Flink CDC 3.4.0 (asc, sha512)
 Apache Flink CDC 3.4.0 Source Release (asc, sha512)
 This component is compatible with Apache Flink version(s):
@@ -209,7 +214,7 @@
 As of March 2023, the Flink community decided that upon release of a new Flink minor version, the community will perform one final bugfix release for resolved critical/blocker issues in the Flink minor version losing support. If 1.16.1 is the current release and 1.15.4 is the latest previous patch version, once 1.17.0 is released we will create a 1.15.5 to flush out any resolved critical/blocker issues.
 Note that the community is always open to discussing bugfix releases for even older versions. Please get in touch with the developers for that on the dev@flink.apache.org mailing list.
 All stable releases # All Flink releases are available via https://archive.apache.org/dist/flink/ including checksums and cryptographic signatures. At the time of writing, this includes the following versions:
-Apache Flink # Apache Flink 2.1.0 - 2025-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 2.0.0 - 2025-03-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.2 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.1 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.0 - 2024-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.3 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.2 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.1 - 2024-06-14 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.0 - 2024-03-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.1 - 2024-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.0 - 2023-10-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.2 - 2023-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.1 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.0 - 2023-03-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.3 - 2023-11-20 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.2 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.1 - 2023-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.0 - 2022-10-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.4 - 2023-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.3 - 2022-11-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.2 - 2022-08-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.1 - 2022-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.0 - 2022-05-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.6 - 2022-09-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.5 - 2022-06-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.4 - 2022-03-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.3 - 2022-01-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.2 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.0 - 2021-09-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.6 - 2022-02-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.5 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.3 - 2021-10-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.2 - 2021-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.1 - 2021-05-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.0 - 2021-04-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.7 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.5 - 2021-08-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.4 - 2021-05-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.3 - 2021-04-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.2 - 2021-03-03 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.1 - 2021-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.0 - 2020-12-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.6 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.4 - 2021-08-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.3 - 2020-12-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.2 - 2020-09-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.1 - 2020-07-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.0 - 2020-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.3 - 2021-01-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.2 - 2020-08-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.1 - 2020-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.0 - 2020-02-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.3 - 2020-04-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.2 - 2020-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.1 - 2019-10-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.0 - 2019-08-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.3 - 2019-12-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.2 - 2019-09-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.1 - 2019-07-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.0 - 2019-04-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.2 - 2019-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.1 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.0 - 2018-11-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.4 - 2019-02-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.3 - 2018-12-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.2 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.1 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.0 - 2018-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.6 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.5 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.4 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.3 - 2018-08-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.2 - 2018-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.1 - 2018-07-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.0 - 2018-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.2 - 2018-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.1 - 2018-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.0 - 2017-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.3 - 2018-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.2 - 2017-08-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.1 - 2017-06-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.0 - 2017-06-01 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.1 - 2017-04-26 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.0 - 2017-02-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.5 - 2017-03-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.4 - 2016-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.3 - 2016-10-13 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.2 - 2016-09-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.1 - 2016-08-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.0 - 2016-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.3 - 2016-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.2 - 2016-04-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.1 - 2016-04-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.0 - 2016-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 0.10.2 - 2016-02-11 (Source, Binaries) Apache Flink 0.10.1 - 2015-11-27 (Source, Binaries) Apache Flink 0.10.0 - 2015-11-16 (Source, Binaries) Apache Flink 0.9.1 - 2015-09-01 (Source, Binaries) Apache Flink 0.9.0 - 2015-06-24 (Source, Binaries) Apache Flink 0.9.0-milestone-1 - 2015-04-13 (Source, Binaries) Apache Flink 0.8.1 - 2015-02-20 (Source, Binaries) Apache Flink 0.8.0 - 2015-01-22 (Source, Binaries) Apache Flink 0.7.0-incubating - 2014-11-04 (Source, Binaries) Apache Flink 0.6.1-incubating - 2014-09-26 (Source, Binaries) Apache Flink 0.6-incubating - 2014-08-26 (Source, Binaries) Apache Flink connectors # Flink Elasticsearch Connector 3.0.0 - 2022-11-09 (Source) Flink AWS Connectors 3.0.0 - 2022-11-28 (Source) Flink Cassandra Connector 3.0.0 - 2022-11-30 (Source) Flink AWS Connectors 4.0.0 - 2022-12-09 (Source) Flink Pulsar Connector 3.0.0 - 2022-12-20 (Source) Flink JDBC Connector 3.0.0 - 2022-11-30 (Source) Flink RabbitMQ Connectors 3.0.0 - 2022-12-13 (Source) Flink Opensearch Connector 1.0.0 - 2022-12-21 (Source) Flink Google Cloud PubSub Connector 3.0.0 - 2023-01-31 (Source) Flink MongoDB Connector 1.0.0 - 2023-04-03 (Source) Flink AWS Connectors 4.1.0 - 2023-04-03 (Source) Flink Kafka Connector 3.0.0 - 2023-04-21 (Source) Flink MongoDB Connector 1.0.1 - 2023-04-24 (Source) Flink JDBC Connector 3.1.0 - 2023-05-05 (Source) Flink RabbitMQ Connectors 3.0.1 - 2023-05-08 (Source) Flink Elasticsearch Connector 3.0.1 - 2023-05-08 (Source) Flink Opensearch Connector 1.0.1 - 2023-05-08 (Source) Flink Pulsar Connector 4.0.0 - 2023-05-08 (Source) Flink Google Cloud PubSub Connector 3.0.1 - 2023-05-09 (Source) Flink Cassandra Connector 3.1.0 - 2023-05-25 (Source) Flink Pulsar Connector 3.0.1 - 2023-06-07 (Source) Flink JDBC Connector 3.1.1 - 2023-06-28 (Source) Flink MongoDB Connector 1.0.2 - 2023-08-15 (Source) Flink HBase Connector 3.0.0 - 2023-09-1 (Source) Flink Kafka Connector 3.0.1 - 2023-10-30 (Source) Flink AWS Connectors 4.2.0 - 2023-11-30 (Source) Flink Kafka Connector 3.0.2 - 2023-12-01 (Source) Flink Pulsar Connector 4.1.0 - 2023-12-28 (Source) Flink Google Cloud PubSub Connector 3.0.2 - 2024-01-12 (Source) Flink Opensearch Connector 1.1.0 - 2024-02-01 (Source) Flink Kafka Connector 3.1.0 - 2024-02-07 (Source) Flink MongoDB Connector 1.1.0 - 2024-02-19 (Source) Flink JDBC Connector 3.1.2 - 2024-02-21 (Source) Flink MongoDB Connector 1.2.0 - 2024-06-06 (Source) Flink Google Cloud PubSub Connector 3.1.0 - 2024-06-07 (Source) Flink AWS Connectors 4.3.0 - 2024-06-07 (Source) Flink Cassandra Connector 3.2.0 - 2024-06-07 (Source) Flink JDBC Connector 3.2.0 - 2024-06-07 (Source) Flink Kafka Connector 3.2.0 - 2024-06-07 (Source) Flink Opensearch Connector 2.0.0 - 2024-06-11 (Source) Flink Opensearch Connector 1.2.0 - 2024-06-11 (Source) Flink Kafka Connector 3.3.0 - 2024-10-17 (Source) Flink Prometheus Connector 1.0.0 - 2024-11-08 (Source) Flink AWS Connectors 5.0.0 - 2024-11-11 (Source) Flink Kafka Connector 3.4.0 - 2024-11-25 (Source) Flink HBase Connector 4.0.0 - 2024-11-26 (Source) Flink Hive Connector 3.0.0 - 2025-02-10 (Source) Flink MongoDB Connector 2.0.0 - 2025-03-17 (Source) Flink Kudu Connector 2.0.0 - 2025-04-14 (Source) Flink Elasticsearch Connector 3.1.0 - 2025-04-15 (Source) Flink Elasticsearch Connector 4.0.0 - 2025-04-17 (Source) Flink JDBC Connector 3.3.0 - 2025-04-22 (Source) Flink JDBC Connector 4.0.0 - 2025-04-22 (Source) Flink Kafka Connector 4.0.0 - 2025-04-24 (Source) Apache Flink CDC # Apache Flink CDC 3.4.0 - 2025-05-16 (Source, Binaries) Apache Flink CDC 3.3.0 - 2025-01-21 (Source, Binaries) Apache Flink CDC 3.2.1 - 2024-11-27 (Source, Binaries) Apache Flink CDC 3.2.0 - 2024-09-05 (Source, Binaries) Apache Flink CDC 3.1.1 - 2024-06-18 (Source, Binaries) Apache Flink CDC 3.1.0 - 2024-05-17 (Source, Binaries) Apache Flink Stateful Functions # Apache Flink Stateful Functions 3.3.0 - 2023-09-19 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.2.0 - 2022-01-27 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.1 - 2021-12-22 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.0 - 2021-08-30 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.0.0 - 2021-04-14 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.2 - 2021-01-02 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.1 - 2020-11-09 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.0 - 2020-09-28 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.1.0 - 2020-06-08 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.0.0 - 2020-04-02 (Source, Docs, Javadocs) Apache Flink Shaded # Apache Flink Shaded 20.0 - 2025-02-24 (Source) Apache Flink Shaded 19.0 - 2024-07-03 (Source) Apache Flink Shaded 18.0 - 2024-01-11 (Source) Apache Flink Shaded 17.0 - 2023-05-08 (Source) Apache Flink Shaded 16.2 - 2023-11-17 (Source) Apache Flink Shaded 16.1 - 2022-11-24 (Source) Apache Flink Shaded 16.0 - 2022-10-07 (Source) Apache Flink Shaded 15.0 - 2022-01-21 (Source) Apache Flink Shaded 14.0 - 2021-07-21 (Source) Apache Flink Shaded 13.0 - 2021-04-06 (Source) Apache Flink Shaded 12.0 - 2020-10-09 (Source) Apache Flink Shaded 11.0 - 2020-05-29 (Source) Apache Flink Shaded 10.0 - 2020-02-17 (Source) Apache Flink Shaded 9.0 - 2019-11-23 (Source) Apache Flink Shaded 8.0 - 2019-08-28 (Source) Apache Flink Shaded 7.0 - 2019-05-30 (Source) Apache Flink Shaded 6.0 - 2019-02-12 (Source) Apache Flink Shaded 5.0 - 2018-10-15 (Source) Apache Flink Shaded 4.0 - 2018-06-06 (Source) Apache Flink Shaded 3.0 - 2018-02-28 (Source) Apache Flink Shaded 2.0 - 2017-10-30 (Source) Apache Flink Shaded 1.0 - 2017-07-27 (Source) Apache Flink ML # Apache Flink ML 2.3.0 - 2023-07-01 (Source) Apache Flink ML 2.2.0 - 2023-04-19 (Source) Apache Flink ML 2.1.0 - 2022-07-12 (Source) Apache Flink ML 2.0.0 - 2021-01-07 (Source) Apache Flink Kubernetes Operator # Apache Flink Kubernetes Operator 1.12.1 - 2025-07-08 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.12.0 - 2025-05-28 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.11.0 - 2025-03-03 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.10.0 - 2024-10-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.9.0 - 2024-07-02 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.8.0 - 2024-03-21 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.7.0 - 2023-11-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.1 - 2023-10-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.0 - 2023-08-15 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.5.0 - 2023-05-17 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.4.0 - 2023-02-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.1 - 2023-01-10 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.0 - 2022-12-14 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.2.0 - 2022-10-05 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.1.0 - 2022-07-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.1 - 2022-06-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.0 - 2022-06-04 (Source, Helm Chart) Apache Flink Kubernetes Operator 0.1.0 - 2022-04-02 (Source, Helm Chart) Apache Flink Table Store # Apache Flink Table Store 0.3.0 - 2023-01-13 (Source, Binaries) Apache Flink Table Store 0.2.0 - 2022-08-29 (Source, Binaries) Apache Flink Table Store 0.1.0 - 2022-05-11 (Source, Binaries) `}),e.add({id:13,href:"/documentation/flink-kubernetes-operator-master/",title:"Kubernetes Operator Main (snapshot)",section:"Documentation",content:" Flink Kubernetes Operator documentation (latest snapshot) # You can find the Flink Kubernetes Operator documentation for the latest snapshot here. "}),e.add({id:14,href:"/what-is-flink/powered-by/",title:"Powered By",section:"About",content:` Powered By Flink # Apache Flink powers business-critical applications in many companies and enterprises around the globe. On this page, we present a few notable Flink users that run interesting use cases in production and link to resources that discuss their applications in more detail.
+Apache Flink # Apache Flink 2.1.0 - 2025-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 2.0.0 - 2025-03-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.2 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.1 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.0 - 2024-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.3 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.2 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.1 - 2024-06-14 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.0 - 2024-03-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.1 - 2024-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.0 - 2023-10-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.2 - 2023-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.1 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.0 - 2023-03-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.3 - 2023-11-20 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.2 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.1 - 2023-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.0 - 2022-10-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.4 - 2023-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.3 - 2022-11-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.2 - 2022-08-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.1 - 2022-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.0 - 2022-05-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.6 - 2022-09-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.5 - 2022-06-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.4 - 2022-03-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.3 - 2022-01-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.2 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.0 - 2021-09-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.6 - 2022-02-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.5 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.3 - 2021-10-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.2 - 2021-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.1 - 2021-05-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.0 - 2021-04-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.7 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.5 - 2021-08-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.4 - 2021-05-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.3 - 2021-04-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.2 - 2021-03-03 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.1 - 2021-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.0 - 2020-12-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.6 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.4 - 2021-08-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.3 - 2020-12-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.2 - 2020-09-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.1 - 2020-07-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.0 - 2020-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.3 - 2021-01-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.2 - 2020-08-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.1 - 2020-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.0 - 2020-02-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.3 - 2020-04-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.2 - 2020-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.1 - 2019-10-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.0 - 2019-08-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.3 - 2019-12-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.2 - 2019-09-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.1 - 2019-07-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.0 - 2019-04-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.2 - 2019-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.1 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.0 - 2018-11-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.4 - 2019-02-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.3 - 2018-12-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.2 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.1 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.0 - 2018-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.6 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.5 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.4 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.3 - 2018-08-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.2 - 2018-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.1 - 2018-07-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.0 - 2018-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.2 - 2018-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.1 - 2018-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.0 - 2017-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.3 - 2018-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.2 - 2017-08-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.1 - 2017-06-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.0 - 2017-06-01 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.1 - 2017-04-26 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.0 - 2017-02-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.5 - 2017-03-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.4 - 2016-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.3 - 2016-10-13 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.2 - 2016-09-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.1 - 2016-08-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.0 - 2016-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.3 - 2016-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.2 - 2016-04-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.1 - 2016-04-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.0 - 2016-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 0.10.2 - 2016-02-11 (Source, Binaries) Apache Flink 0.10.1 - 2015-11-27 (Source, Binaries) Apache Flink 0.10.0 - 2015-11-16 (Source, Binaries) Apache Flink 0.9.1 - 2015-09-01 (Source, Binaries) Apache Flink 0.9.0 - 2015-06-24 (Source, Binaries) Apache Flink 0.9.0-milestone-1 - 2015-04-13 (Source, Binaries) Apache Flink 0.8.1 - 2015-02-20 (Source, Binaries) Apache Flink 0.8.0 - 2015-01-22 (Source, Binaries) Apache Flink 0.7.0-incubating - 2014-11-04 (Source, Binaries) Apache Flink 0.6.1-incubating - 2014-09-26 (Source, Binaries) Apache Flink 0.6-incubating - 2014-08-26 (Source, Binaries) Apache Flink connectors # Flink Elasticsearch Connector 3.0.0 - 2022-11-09 (Source) Flink AWS Connectors 3.0.0 - 2022-11-28 (Source) Flink Cassandra Connector 3.0.0 - 2022-11-30 (Source) Flink AWS Connectors 4.0.0 - 2022-12-09 (Source) Flink Pulsar Connector 3.0.0 - 2022-12-20 (Source) Flink JDBC Connector 3.0.0 - 2022-11-30 (Source) Flink RabbitMQ Connectors 3.0.0 - 2022-12-13 (Source) Flink Opensearch Connector 1.0.0 - 2022-12-21 (Source) Flink Google Cloud PubSub Connector 3.0.0 - 2023-01-31 (Source) Flink MongoDB Connector 1.0.0 - 2023-04-03 (Source) Flink AWS Connectors 4.1.0 - 2023-04-03 (Source) Flink Kafka Connector 3.0.0 - 2023-04-21 (Source) Flink MongoDB Connector 1.0.1 - 2023-04-24 (Source) Flink JDBC Connector 3.1.0 - 2023-05-05 (Source) Flink RabbitMQ Connectors 3.0.1 - 2023-05-08 (Source) Flink Elasticsearch Connector 3.0.1 - 2023-05-08 (Source) Flink Opensearch Connector 1.0.1 - 2023-05-08 (Source) Flink Pulsar Connector 4.0.0 - 2023-05-08 (Source) Flink Google Cloud PubSub Connector 3.0.1 - 2023-05-09 (Source) Flink Cassandra Connector 3.1.0 - 2023-05-25 (Source) Flink Pulsar Connector 3.0.1 - 2023-06-07 (Source) Flink JDBC Connector 3.1.1 - 2023-06-28 (Source) Flink MongoDB Connector 1.0.2 - 2023-08-15 (Source) Flink HBase Connector 3.0.0 - 2023-09-1 (Source) Flink Kafka Connector 3.0.1 - 2023-10-30 (Source) Flink AWS Connectors 4.2.0 - 2023-11-30 (Source) Flink Kafka Connector 3.0.2 - 2023-12-01 (Source) Flink Pulsar Connector 4.1.0 - 2023-12-28 (Source) Flink Google Cloud PubSub Connector 3.0.2 - 2024-01-12 (Source) Flink Opensearch Connector 1.1.0 - 2024-02-01 (Source) Flink Kafka Connector 3.1.0 - 2024-02-07 (Source) Flink MongoDB Connector 1.1.0 - 2024-02-19 (Source) Flink JDBC Connector 3.1.2 - 2024-02-21 (Source) Flink MongoDB Connector 1.2.0 - 2024-06-06 (Source) Flink Google Cloud PubSub Connector 3.1.0 - 2024-06-07 (Source) Flink AWS Connectors 4.3.0 - 2024-06-07 (Source) Flink Cassandra Connector 3.2.0 - 2024-06-07 (Source) Flink JDBC Connector 3.2.0 - 2024-06-07 (Source) Flink Kafka Connector 3.2.0 - 2024-06-07 (Source) Flink Opensearch Connector 2.0.0 - 2024-06-11 (Source) Flink Opensearch Connector 1.2.0 - 2024-06-11 (Source) Flink Kafka Connector 3.3.0 - 2024-10-17 (Source) Flink Prometheus Connector 1.0.0 - 2024-11-08 (Source) Flink AWS Connectors 5.0.0 - 2024-11-11 (Source) Flink Kafka Connector 3.4.0 - 2024-11-25 (Source) Flink HBase Connector 4.0.0 - 2024-11-26 (Source) Flink Hive Connector 3.0.0 - 2025-02-10 (Source) Flink MongoDB Connector 2.0.0 - 2025-03-17 (Source) Flink Kudu Connector 2.0.0 - 2025-04-14 (Source) Flink Elasticsearch Connector 3.1.0 - 2025-04-15 (Source) Flink Elasticsearch Connector 4.0.0 - 2025-04-17 (Source) Flink JDBC Connector 3.3.0 - 2025-04-22 (Source) Flink JDBC Connector 4.0.0 - 2025-04-22 (Source) Flink Kafka Connector 4.0.0 - 2025-04-24 (Source) Apache Flink CDC # Apache Flink CDC 3.5.0 - 2025-09-26 (Source, Binaries) Apache Flink CDC 3.4.0 - 2025-05-16 (Source, Binaries) Apache Flink CDC 3.3.0 - 2025-01-21 (Source, Binaries) Apache Flink CDC 3.2.1 - 2024-11-27 (Source, Binaries) Apache Flink CDC 3.2.0 - 2024-09-05 (Source, Binaries) Apache Flink CDC 3.1.1 - 2024-06-18 (Source, Binaries) Apache Flink CDC 3.1.0 - 2024-05-17 (Source, Binaries) Apache Flink Stateful Functions # Apache Flink Stateful Functions 3.3.0 - 2023-09-19 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.2.0 - 2022-01-27 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.1 - 2021-12-22 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.0 - 2021-08-30 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.0.0 - 2021-04-14 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.2 - 2021-01-02 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.1 - 2020-11-09 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.0 - 2020-09-28 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.1.0 - 2020-06-08 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.0.0 - 2020-04-02 (Source, Docs, Javadocs) Apache Flink Shaded # Apache Flink Shaded 20.0 - 2025-02-24 (Source) Apache Flink Shaded 19.0 - 2024-07-03 (Source) Apache Flink Shaded 18.0 - 2024-01-11 (Source) Apache Flink Shaded 17.0 - 2023-05-08 (Source) Apache Flink Shaded 16.2 - 2023-11-17 (Source) Apache Flink Shaded 16.1 - 2022-11-24 (Source) Apache Flink Shaded 16.0 - 2022-10-07 (Source) Apache Flink Shaded 15.0 - 2022-01-21 (Source) Apache Flink Shaded 14.0 - 2021-07-21 (Source) Apache Flink Shaded 13.0 - 2021-04-06 (Source) Apache Flink Shaded 12.0 - 2020-10-09 (Source) Apache Flink Shaded 11.0 - 2020-05-29 (Source) Apache Flink Shaded 10.0 - 2020-02-17 (Source) Apache Flink Shaded 9.0 - 2019-11-23 (Source) Apache Flink Shaded 8.0 - 2019-08-28 (Source) Apache Flink Shaded 7.0 - 2019-05-30 (Source) Apache Flink Shaded 6.0 - 2019-02-12 (Source) Apache Flink Shaded 5.0 - 2018-10-15 (Source) Apache Flink Shaded 4.0 - 2018-06-06 (Source) Apache Flink Shaded 3.0 - 2018-02-28 (Source) Apache Flink Shaded 2.0 - 2017-10-30 (Source) Apache Flink Shaded 1.0 - 2017-07-27 (Source) Apache Flink ML # Apache Flink ML 2.3.0 - 2023-07-01 (Source) Apache Flink ML 2.2.0 - 2023-04-19 (Source) Apache Flink ML 2.1.0 - 2022-07-12 (Source) Apache Flink ML 2.0.0 - 2021-01-07 (Source) Apache Flink Kubernetes Operator # Apache Flink Kubernetes Operator 1.12.1 - 2025-07-08 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.12.0 - 2025-05-28 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.11.0 - 2025-03-03 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.10.0 - 2024-10-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.9.0 - 2024-07-02 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.8.0 - 2024-03-21 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.7.0 - 2023-11-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.1 - 2023-10-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.0 - 2023-08-15 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.5.0 - 2023-05-17 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.4.0 - 2023-02-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.1 - 2023-01-10 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.0 - 2022-12-14 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.2.0 - 2022-10-05 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.1.0 - 2022-07-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.1 - 2022-06-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.0 - 2022-06-04 (Source, Helm Chart) Apache Flink Kubernetes Operator 0.1.0 - 2022-04-02 (Source, Helm Chart) Apache Flink Table Store # Apache Flink Table Store 0.3.0 - 2023-01-13 (Source, Binaries) Apache Flink Table Store 0.2.0 - 2022-08-29 (Source, Binaries) Apache Flink Table Store 0.1.0 - 2022-05-11 (Source, Binaries) `}),e.add({id:13,href:"/documentation/flink-kubernetes-operator-master/",title:"Kubernetes Operator Main (snapshot)",section:"Documentation",content:" Flink Kubernetes Operator documentation (latest snapshot) # You can find the Flink Kubernetes Operator documentation for the latest snapshot here. "}),e.add({id:14,href:"/what-is-flink/powered-by/",title:"Powered By",section:"About",content:` Powered By Flink # Apache Flink powers business-critical applications in many companies and enterprises around the globe. On this page, we present a few notable Flink users that run interesting use cases in production and link to resources that discuss their applications in more detail.
 More Flink users are listed in the Powered by Flink directory in the project wiki. Please note that the list is not comprehensive. We only add users that explicitly ask to be listed.
 If you would you like to be included on this page, please reach out to the Flink user mailing list and let us know.
 Alibaba, the world&rsquo;s largest retailer, uses a fork of Flink called Blink to optimize search rankings in real time. Read more about Flink&rsquo;s role at Alibaba Amazon Managed Service for Apache Flink is a fully managed Amazon service that enables you to use an Apache Flink application to process and analyze streaming data. BetterCloud, a multi-SaaS management platform, uses Flink to surface near real-time intelligence from SaaS application activity. See BetterCloud at Flink Forward SF 2017 Bouygues Telecom is running 30 production applications powered by Flink and is processing 10 billion raw events per day. See Bouygues Telcom at Flink Forward 2016 Capital One, a Fortune 500 financial services company, uses Flink for real-time activity monitoring and alerting. Learn about Capital One&rsquo;s fraud detection use case Comcast, a global media and technology company, uses Flink for operationalizing machine learning models and near-real-time event stream processing. Learn about Flink at Comcast Criteo is the advertising platform for the open internet and uses Flink for real-time revenue monitoring and near-real-time event processing. Learn about Criteo&rsquo;s Flink use case Didi Chuxing (“DiDi”), the world&rsquo;s leading mobile transportation platform, uses Apache Flink for real-time monitoring, feature extraction, and ETL. Learn about Didi&rsquo;s Flink use case Drivetribe, a digital community founded by the former hosts of “Top Gear”, uses Flink for metrics and content recommendations. Read about Flink in the Drivetribe stack Ebay&rsquo;s monitoring platform is powered by Flink and evaluates thousands of customizable alert rules on metrics and log streams. Learn more about Flink at Ebay Ericsson used Flink to build a real-time anomaly detector with machine learning over large infrastructures. Read a detailed overview on O&rsquo;Reilly Ideas Gojek is a Super App: one app with over 20 services uses Flink to power their self-serve platform empowering data-driven decisions across functions. Read more on the Gojek engineering blog Huawei is a leading global provider of ICT infrastructure and smart devices. Huawei Cloud provides Cloud Service based on Flink. Learn about how Flink powers Cloud Service King, the creators of Candy Crush Saga, uses Flink to provide data science teams a real-time analytics dashboard. Learn more about King&rsquo;s Flink implementation Klaviyo leverages Apache Flink to scale its real-time analytics system that deduplicates and aggregates over a million events per second. Read about real-time analytics at Klaviyo Kuaishou, one of the leading short video sharing apps in China, uses Apache Flink to build a real-time monitoring platform for short videos and live streaming. Read about real-time monitoring at Kuaishou Lyft uses Flink as processing engine for its streaming platform, for example to consistently generate features for machine learning. Read more about Streaming at Lyft MediaMath, a programmatic marketing company, uses Flink to power its real-time reporting infrastructure. See MediaMath at Flink Forward SF 2017 Mux, an analytics company for streaming video providers, uses Flink for real-time anomaly detection and alerting. Read more about how Mux is using Flink OPPO, one of the largest mobile phone manufacturers in China, build a real-time data warehouse with Flink to analyze the effects of operating activities and short-term interests of users. Read more about how OPPO is using Flink Otto Group, the world&rsquo;s second-largest online retailer, uses Flink for business intelligence stream processing. See Otto at Flink Forward 2016 OVH leverages Flink to develop streaming-oriented applications such as real-time Business Intelligence or alerting systems. Read more about how OVH is using Flink Pinterest runs thousands of experiments every day on a platform for real-time experiment analytics that is based on Apache Flink. Read more about real-time experiment analytics at Pinterest Razorpay, one of India&rsquo;s largest payment gateways, built their in-house platform Mitra with Apache Flink to scale AI feature generation and model serving in real-time. Read more about data science with Flink at Razorpay ResearchGate, a social network for scientists, uses Flink for network analysis and near-duplicate detection. See ResearchGate at Flink Forward 2016 SK telecom is South Korea&rsquo;s largest wireless carrier and uses Flink for several applications including smart factory and mobility applications. Learn more about one of SK telecom&rsquo;s use cases Telefónica NEXT&rsquo;s TÜV-certified Data Anonymization Platform is powered by Flink. Read more about Telefónica NEXT Tencent, one of the largest Internet companies, built an in-house platform with Apache Flink to improve the efficiency of developing and operating real-time applications. Read more about Tencent&rsquo;s platform. Uber built their internal SQL-based, open-source streaming analytics platform AthenaX on Apache Flink. Read more on the Uber engineering blog Vip, one of the largest warehouse sale website for big brands in China, uses Flink to stream and ETL data into Apache Hive in real-time for data processing and analytics. Read more about Vip&rsquo;s story. Xiaomi, one of the largest electronics companies in China, built a platform with Flink to improve the efficiency of developing and operating real-time applications and use it in real-time recommendations. Learn more about how Xiaomi is using Flink. Yelp utilizes Flink to power its data connectors ecosystem and stream processing infrastructure. Find out more watching a Flink Forward talk Zalando, one of the largest e-commerce companies in Europe, uses Flink for real-time process monitoring and ETL. Read more on the Zalando Tech Blog `}),e.add({id:15,href:"/getting-started/with-flink-stateful-functions/",title:"With Flink Stateful Functions",section:"Getting Started",content:" Getting Started with Flink Stateful Functions # Read how you can get started with Flink Stateful Functions here. "}),e.add({id:16,href:"/documentation/flink-cdc-stable/",title:"CDC $FlinkCDCStableShortVersion (stable)",section:"Documentation",content:" Flink CDC documentation (latest stable release) # You can find the Flink CDC documentation for the latest stable release here. "}),e.add({id:17,href:"/what-is-flink/roadmap/",title:"Roadmap",section:"About",content:` Roadmap # Preamble: This roadmap means to provide users and contributors with a high-level summary of ongoing efforts, grouped by the major threads to which the efforts belong. With so much that is happening in Flink, we hope that this helps with understanding the direction of the project. The roadmap contains both efforts in early stages as well as nearly completed efforts, so that users may get a better impression of the overall status and direction of those developments.
@@ -486,7 +491,12 @@
 Please refer to the Configuration Reference for details.
 My job fails with various exceptions from the HDFS/Hadoop code. What can I do? # The most common cause for that is that the Hadoop version in Flink&rsquo;s classpath is different than the Hadoop version of the cluster you want to connect to (HDFS / YARN).
 The easiest way to fix that is to pick a Hadoop-free Flink version and simply export the Hadoop path and classpath from the cluster.
-`}),e.add({id:38,href:"/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/",title:"Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data + AI with Comprehensive Upgrades",section:"Flink Blog",content:`The Apache Flink PMC is proud to announce the release of Apache Flink 2.1.0. This marks a significant milestone in the evolution of the real-time data processing engine into a unified Data + AI platform. This release brings together 116 global contributors, implements 16 FLIPs (Flink Improvement Proposals), and resolves over 220 issues, with a strong focus on deepening the integration of real-time AI and intelligent stream processing:
+`}),e.add({id:38,href:"/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/",title:"Apache Flink CDC 3.5.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!
+This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in multi-tables (with frequent table structure changes) synchronization scenario, many issues encountered in the transform and Schema evolution frameworks have also been fixed.
+Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!
+Highlights # Pipeline Core # Schema Evolution Optimization # [FLINK-38045] During job failover, reissue the schema information stored in the state of Source to enhance the correctness of handling schema changes and make transform operator stateless. [FLINK-38243][FLINK-38244] Properly handle schema evolution for case-sensitive table and column names. Transform Enhancement # FLINK-38079] Enhance precision support for DATE and TIME types to improve temporal handling in built-in and user-defined functions. Incremental Source Framework # [FLINK-38265] Properly design the termination logic for stream split to prevent exceptions causing the job to get stuck. Pipeline Connectors # Apache Fluss (newly added) # [FLINK-37958] Apache Fluss (Incubating) is a streaming storage built for real-time analytics which can serve as the real-time data layer for Lakehouse architectures. In this version, Fluss is supported to be the sink for Pipeline jobs. PostgreSQL (newly added) # [FLINK-35670] PostgreSQL is a powerful, open source object-relational database system with over 35 years of active development that has earned it a strong reputation for reliability, feature robustness, and performance. In this version, PostgreSQL is supported to be the source for Pipeline jobs. Apache Paimon # [FLINK-38142] Bump Paimon version to 1.2.0. [FLINK-38206] Support writing to existed table with inconsistent schema with upstream. [FLINK-37824] Support column comments when creating a new table. Source Connectors # MySQL CDC # [FLINK-37065] Fix potential data loss caused by GTID out-of-order scenarios. [FLINK-38238] Add support for processing table with varchar(0) column. PostgreSQL CDC # [FLINK-37479] Support discovering partitioned tables. [FLINK-37738] Support reading changelog as append only mode. OceanBase CDC # [FLINK-38111] Migrate OceanBase CDC Connector from LogProxy to OceanBase Binlog Service. List of Contributors # We would like to express gratitude to all the contributors working on this release:
+Sachin Mittal, suhwan, Lanny Boarts, hql0312, yuanoOo, gongzhongqiang, Kunni, North Lin, suntectec, SeungMin, ChengJie1053, kangzai, Ihor Mielientiev, Vinh Pham, wudi, Marta Paes, Shawn Huang, zhangchao.doovvv, linjc13, Sergei Morozov, MOBIN, Junbo Wang, junmuz, lvyanquan, Thorne, zhuxt2015, Xin Gong, linjianchang, tbpure, Tianzhu Wen, yuxiqian.yxq, Naci Simsek, Мухутдинов Артур, Hongshun Wang, proletarians, wangjunbo, Chao Zhang, ouyangwulin, Hang Ruan, Junbo wang, yuxiqian, wuzexian
+`}),e.add({id:39,href:"/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/",title:"Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data + AI with Comprehensive Upgrades",section:"Flink Blog",content:`The Apache Flink PMC is proud to announce the release of Apache Flink 2.1.0. This marks a significant milestone in the evolution of the real-time data processing engine into a unified Data + AI platform. This release brings together 116 global contributors, implements 16 FLIPs (Flink Improvement Proposals), and resolves over 220 issues, with a strong focus on deepening the integration of real-time AI and intelligent stream processing:
 Breakthroughs in Real-Time AI:
 Introduces AI Model DDL, enabling flexible management of AI models through Flink SQL and the Table API.
 Extends the ML_PREDICT Table-Valued Function (TVF), empowering real-time invocation of AI models within Flink SQL, laying the foundation for building end-to-end real-time AI workflows.
@@ -543,19 +553,19 @@
 FLINK-37760 Upgrade Notes # The Flink community tries to ensure that upgrades are as seamless as possible. However, certain changes may require users to make adjustments to certain parts of the program when upgrading to version 2.1. Please refer to the release notes for a comprehensive list of adjustments to make and issues to check during the upgrading process.
 List of Contributors # The Apache Flink community would like to express gratitude to all the contributors who made this release possible:
 Ahmed Hamdy, Alan Sheinberg, Aleksandr Iushmanov, Aleksandr Savonin, AlexYinHan, Ammu Parvathy, Anupam Aggarwal, Ao Li, Arvid Heise, Au-Miner, Benchao Li, Bonnie Varghese, Chris, David Moravek, David Radley, David Wang, Dawid Wysakowicz, Dian Fu, Efrat Levitan, Feng Jin, Ferenc Csaky, Francesco Di Chiara, Gabor Somogyi, Gunnar Morling, Gustavo de Morais, Hangxiang Yu, Hao Li, Hongjia Liang, HuangXingBo, Jiaan Geng, Jiabao Sun, Jiangjie (Becket) Qin, Joery, JunRuiLee, Junrui Lee, Juntao Zhang, Kunni, Kurt Ostfeld, Laffery, Lukas Schwerdtfeger, Luke Chen, Martijn Visser, Mate Czagany, Matthias Pohl, Mika Naylor, Mina Asham, Mingliang Liu, Muhammet Orazov, Márton Balassi, PB, Pan Yuepeng, Peter Huang, Piotr Nowojski, Roc Marshal, Rui Fan, Ryan van Huuksloot, Sasaki Toru, Sergey Nuyanzin, Shengkai, Shuyi Chen, Stepan Stepanishchev, Thomas Cooper, Tianzhu Wen, Timo Walther, Venkata krishnan Sowrirajan, Weijie Guo, Xiangyu Feng, Xu Huang, XuShuai, Xuannan, Xuyang, Yanfei Lei, Yi Zhang, Yuepeng Pan, Yun Tang, Zakelly, Zdenek Tison, Zhanghao Chen, atu-sharm, beliefer, big face cat, chenyuzhi459, fengli, fredia, gengbiao.gb, glorinli, hejufang, huangyanyanyan, jingge, lincoln lee, mayuehappy, moses, mzzx, nacisimsek, nilmadhab mondal, noorall, novakov-alexey, r-sidd, slankka, slfan1989, sunxia, sxnan, wangfeifan, wangqh, wangxinglong, xiangyu0xf, xiaoyu, xingbo, xuyang, yanand0909, yhx, yuhang2.zhang, yunfengzhou-hub, 余良, 皆非, 马越
-`}),e.add({id:39,href:"/2025/07/10/apache-flink-1.19.3-release-announcement/",title:"Apache Flink 1.19.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the third bug fix release of the Flink 1.19 series.
+`}),e.add({id:40,href:"/2025/07/10/apache-flink-1.19.3-release-announcement/",title:"Apache Flink 1.19.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the third bug fix release of the Flink 1.19 series.
 This release includes 14 bug fixes, vulnerability fixes, and minor improvements for Flink 1.19. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.19.3 or higher.
 Note: This release contains an improvement regarding how the Adaptive Scheduler works by default on Kubernetes deployments. Beforehand, in case of a downscaling, the Adaptive Scheduler was not striving to minimize the active TaskManager number, and free up resources even if it would have been possible. This logic is now changed and by default the Adaptive Scheduler will try to maximize TaskManager utilization during a downscale event to be able to minimize the necessary TaskManager number.
 Although this behavioral change pushes the boundaries of a patch release, the community agreed that the applied default is expected in the vast majority of use-cases. To keep the previous strategy, make sure you set the jobmanager.adaptive-scheduler.prefer-minimal-taskmanagers configuration option to false. For more details, please see FLINK-33977.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.19.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.19.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.19.3&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.19.3 Release Notes # Release Notes - Flink - Version 1.19.3 Bug [FLINK-34227] - Job doesn&#39;t disconnect from ResourceManager [FLINK-36549] - Using the ignore-parse-errors parameter in Debezium/Canal/Maxwell/Ogg JSON results in unexpected data loss. [FLINK-37505] - Pyflink is not able to handle the new YAML based configs [FLINK-37605] - SinkWriter may incorrectly infer end of input during rescale [FLINK-37609] - Bump parquet libs to 1.15.1 [FLINK-37760] - Bump parquet version to 1.15.2 [FLINK-37783] - TieredStorage doesn&#39;t work when Buffer Debloating is enabled [FLINK-37803] - LocalTime without seconds is incorrectly serialized to SQL as a value literal [FLINK-37820] - AsyncScalarFunction UDFs cannot be loaded via CompiledPlan [FLINK-37833] - Code generated for binary key in BatchExecExchange causes incorrect shuffle [FLINK-37870] - Unaligned checkpoint is disabled for all connections unexpectedly Improvement [FLINK-33977] - Adaptive scheduler may not minimize the number of TMs during downscaling Technical Debt [FLINK-37361] - Update japicmp configuration post 1.19.2 [FLINK-37804] - Python failed to build wheels on macos `}),e.add({id:40,href:"/2025/07/10/apache-flink-1.20.2-release-announcement/",title:"Apache Flink 1.20.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.20 series.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.19.3 Release Notes # Release Notes - Flink - Version 1.19.3 Bug [FLINK-34227] - Job doesn&#39;t disconnect from ResourceManager [FLINK-36549] - Using the ignore-parse-errors parameter in Debezium/Canal/Maxwell/Ogg JSON results in unexpected data loss. [FLINK-37505] - Pyflink is not able to handle the new YAML based configs [FLINK-37605] - SinkWriter may incorrectly infer end of input during rescale [FLINK-37609] - Bump parquet libs to 1.15.1 [FLINK-37760] - Bump parquet version to 1.15.2 [FLINK-37783] - TieredStorage doesn&#39;t work when Buffer Debloating is enabled [FLINK-37803] - LocalTime without seconds is incorrectly serialized to SQL as a value literal [FLINK-37820] - AsyncScalarFunction UDFs cannot be loaded via CompiledPlan [FLINK-37833] - Code generated for binary key in BatchExecExchange causes incorrect shuffle [FLINK-37870] - Unaligned checkpoint is disabled for all connections unexpectedly Improvement [FLINK-33977] - Adaptive scheduler may not minimize the number of TMs during downscaling Technical Debt [FLINK-37361] - Update japicmp configuration post 1.19.2 [FLINK-37804] - Python failed to build wheels on macos `}),e.add({id:41,href:"/2025/07/10/apache-flink-1.20.2-release-announcement/",title:"Apache Flink 1.20.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.20 series.
 This release includes 25 bug fixes, vulnerability fixes, and minor improvements for Flink 1.20. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.20.2 or higher.
 Note: This release contains an improvement regarding how the Adaptive Scheduler works by default on Kubernetes deployments. Beforehand, in case of a downscaling, the Adaptive Scheduler was not striving to minimize the active TaskManager number, and free up resources even if it had been possible. This logic is now changed and by default the Adaptive Scheduler will try to maximize TaskManager utilization during a downscale event to be able to minimize the necessary TaskManager number.
 Although this behavioral change pushes the boundaries of a patch release, the community agreed that the applied default is expected in the vast majority of use cases. To keep the previous strategy, make sure you set the jobmanager.adaptive-scheduler.prefer-minimal-taskmanagers configuration option to false. For more details, please see FLINK-33977.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.20.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.20.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.20.2&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.20.2 Release Notes # Release Notes - Flink - Version 1.20.2 Bug [FLINK-30687] - FILTER not effect in count(*) [FLINK-34227] - Job doesn&#39;t disconnect from ResourceManager [FLINK-35117] - AsyncScalarFunction has a dependency issue. [FLINK-35687] - JSON_QUERY should return a well formatted nested objects/arrays for ARRAY&lt;STRING&gt; [FLINK-36549] - Using the ignore-parse-errors parameter in Debezium/Canal/Maxwell/Ogg JSON results in unexpected data loss. [FLINK-37205] - Correct the state cache behavior during bump beam version [FLINK-37460] - Using State Processor API and Kafka Sink with Exactly once delivery leads to org.apache.kafka.common.errors.InvalidPidMappingException [FLINK-37480] - Jpicmp gives false positive [FLINK-37505] - Pyflink is not able to handle the new YAML based configs [FLINK-37545] - StackOverflowError when using MetricGroup in custom WatermarkStrategy [FLINK-37557] - ResolvedSchema#getPrimaryKeyIndexes does not filter for physical columns [FLINK-37605] - SinkWriter may incorrectly infer end of input during rescale [FLINK-37609] - Bump parquet libs to 1.15.1 [FLINK-37670] - Watermark alignment can deadlock job if there are no more splits to be assigned [FLINK-37760] - Bump parquet version to 1.15.2 [FLINK-37803] - LocalTime without seconds is incorrectly serialized to SQL as a value literal [FLINK-37820] - AsyncScalarFunction UDFs cannot be loaded via CompiledPlan [FLINK-37833] - Code generated for binary key in BatchExecExchange causes incorrect shuffle [FLINK-37870] - Unaligned checkpoint is disabled for all connections unexpectedly Improvement [FLINK-33977] - Adaptive scheduler may not minimize the number of TMs during downscaling [FLINK-37109] - Improve state processor API performance when reading keyed rocksdb state by allowing duplicates Technical Debt [FLINK-37241] - Remove Mockito dependency from StateBackendTestBase [FLINK-37360] - Update japicmp configuration post 1.20.1 [FLINK-37804] - Python failed to build wheels on macos [FLINK-37810] - update log4j to 2.24.3 to fix critical vulnerabilities `}),e.add({id:41,href:"/2025/06/03/apache-flink-kubernetes-operator-1.12.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.12.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.12.0! The version brings a number of important fixes and improvements to both core and autoscaler modules.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.20.2 Release Notes # Release Notes - Flink - Version 1.20.2 Bug [FLINK-30687] - FILTER not effect in count(*) [FLINK-34227] - Job doesn&#39;t disconnect from ResourceManager [FLINK-35117] - AsyncScalarFunction has a dependency issue. [FLINK-35687] - JSON_QUERY should return a well formatted nested objects/arrays for ARRAY&lt;STRING&gt; [FLINK-36549] - Using the ignore-parse-errors parameter in Debezium/Canal/Maxwell/Ogg JSON results in unexpected data loss. [FLINK-37205] - Correct the state cache behavior during bump beam version [FLINK-37460] - Using State Processor API and Kafka Sink with Exactly once delivery leads to org.apache.kafka.common.errors.InvalidPidMappingException [FLINK-37480] - Jpicmp gives false positive [FLINK-37505] - Pyflink is not able to handle the new YAML based configs [FLINK-37545] - StackOverflowError when using MetricGroup in custom WatermarkStrategy [FLINK-37557] - ResolvedSchema#getPrimaryKeyIndexes does not filter for physical columns [FLINK-37605] - SinkWriter may incorrectly infer end of input during rescale [FLINK-37609] - Bump parquet libs to 1.15.1 [FLINK-37670] - Watermark alignment can deadlock job if there are no more splits to be assigned [FLINK-37760] - Bump parquet version to 1.15.2 [FLINK-37803] - LocalTime without seconds is incorrectly serialized to SQL as a value literal [FLINK-37820] - AsyncScalarFunction UDFs cannot be loaded via CompiledPlan [FLINK-37833] - Code generated for binary key in BatchExecExchange causes incorrect shuffle [FLINK-37870] - Unaligned checkpoint is disabled for all connections unexpectedly Improvement [FLINK-33977] - Adaptive scheduler may not minimize the number of TMs during downscaling [FLINK-37109] - Improve state processor API performance when reading keyed rocksdb state by allowing duplicates Technical Debt [FLINK-37241] - Remove Mockito dependency from StateBackendTestBase [FLINK-37360] - Update japicmp configuration post 1.20.1 [FLINK-37804] - Python failed to build wheels on macos [FLINK-37810] - update log4j to 2.24.3 to fix critical vulnerabilities `}),e.add({id:42,href:"/2025/06/03/apache-flink-kubernetes-operator-1.12.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.12.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.12.0! The version brings a number of important fixes and improvements to both core and autoscaler modules.
 We encourage you to download the release and share your experience with the community through the Flink mailing lists or JIRA! We&rsquo;re looking forward to your feedback!
 Highlights # Enhanced Error Visibility and Event Reporting # This release places a strong emphasis on improving the visibility and transparency of errors across the entire Flink Kubernetes Operator stack. These improvements aim to significantly enhance the user experience by making error diagnosis and resolution faster and more intuitive.
 Comprehensive Event Reporting: Events are now emitted across all critical stages of the job lifecycle-from controller errors within the operator itself, to failures during job startup, and even runtime exceptions within Flink jobs. This end-to-end observability ensures that users are promptly informed of any issues that occur.
@@ -568,7 +578,7 @@
 $ helm repo add flink-kubernetes-operator-1.12.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.12.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.12.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Andrea, Daren, David Kornel, Eduardas Kazakas, Gyula Fora, Nick Caballero, Rodrigo, Santwana Verma, Thomas Weise, ctrlaltdilj, pchoudhury22, siddr
-`}),e.add({id:42,href:"/2025/05/16/apache-flink-cdc-3.4.0-release-announcement/",title:"Apache Flink CDC 3.4.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is excited to announce the release of Flink CDC 3.4.0! This release introduces a new pipeline Connector for Apache Iceberg, and provides support for batch execution mode, many issues encountered in the transform and Schema evolution frameworks have also been fixed.
+`}),e.add({id:43,href:"/2025/05/16/apache-flink-cdc-3.4.0-release-announcement/",title:"Apache Flink CDC 3.4.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is excited to announce the release of Flink CDC 3.4.0! This release introduces a new pipeline Connector for Apache Iceberg, and provides support for batch execution mode, many issues encountered in the transform and Schema evolution frameworks have also been fixed.
 Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!
 Highlights # Pipeline Core # Schema Evolution Optimization # Optimized the situation where a large number of CreateTableEvents were sent and processed when a job is started. Optimized the situation where it takes a long time to wait when processing multiple SchemaChangeEvents. Transform Enhancement # Transform arithmetic functions support parameters of null and more numerical types. Fix failure of adding a new column that has the same column name with source table using transform. Batch Execution Mode # We have introduced support for scenarios where only full data synchronization is performed without incremental data synchronization. Currently, users can use this feature by specifying execution.runtime-mode as BATCH in the pipeline.
 Yarn Application Mode # Users can submit job though Cli with command ./bin/flink-cdc.sh -t yarn-application to run job in YARN application mode.
@@ -576,13 +586,13 @@
 Iceberg sink. Iceberg is a high-performance format for huge analytic tables. Iceberg brings the reliability and simplicity of SQL tables to big data, while making it possible for engines like Spark, Trino, Flink, Presto, Hive and Impala to safely work with the same tables, at the same time. In this version, Iceberg is supported to be the downstream for Pipeline jobs. Apache Paimon # Bump Paimon version to 1.0.1. Add support for writing to Append Only table. Write full changelog to Paimon Sink. Performance optimization in Paimon Sink to reduce end-to-end checkpoint time. MySQL CDC # Support read changelog as append only mode for MySQL CDC connector. MySqlSnapshotSplitAssigner assign the ending chunk early to avoid out of memory error from TaskManager. Fix MySQL CDC captures common-prefix database accidentally when scan.binlog.newly-added-table option is enabled. MongoDB CDC # Support metadata &lsquo;row_kind&rsquo; virtual column for Mongo CDC Connector. Update Flink dependency to 1.20 in Flink CDC # Flink CDC version 3.4.0 supports Flink 1.19.x and 1.20.x.
 List of Contributors # We would like to express gratitude to all the contributors working on this release:
 911432, chenhongyu, ConradJam, Ferenc Csaky, gongzhongqiang, Hang Ruan, He Wang, hiliuxg, Hongshun Wang, Jason Zhang, Jiabao Sun, Junbo Wang, Jzjsnow, Kevin Caesar, Kevin Wang, Kunni, Leonard Xu, lidoudou1993, linjianchang, liuxiaodong, lvyanquan, lzshlzsh, MOBIN-F, moses, North Lin, Olivier, ouyangwulin, Petrichor, proletarians, qinghuanwang, Qingsheng Ren, Robin Moffatt, Runkang He, Sergei Morozov, Seung-Min Lee, Shawn Huang, stayrascal, Thorne, Timi, Umesh Dangat, Vincent-Woo, Vinh Pham, wenmo, Wink, wudi, Xin Gong, yohei yoshimuta, yuanoOo, yuxiqian, zhangzheng
-`}),e.add({id:43,href:"/2025/04/30/introducing-the-externalized-kudu-connector/",title:"Introducing the Externalized Kudu Connector",section:"Flink Blog",content:`We are pleased to announce the revival of a connector that makes it possible for Flink to interact with Apache Kudu. The original connector existed as part of the Apache Bahir project, which was moved into the attic. Despite this, we saw interest to keep the Kudu connector updated, hence the community agreed to externalize it as a standalone connector in accordance with the current connector development model. For more information about the externalization process, see FLIP-439.
+`}),e.add({id:44,href:"/2025/04/30/introducing-the-externalized-kudu-connector/",title:"Introducing the Externalized Kudu Connector",section:"Flink Blog",content:`We are pleased to announce the revival of a connector that makes it possible for Flink to interact with Apache Kudu. The original connector existed as part of the Apache Bahir project, which was moved into the attic. Despite this, we saw interest to keep the Kudu connector updated, hence the community agreed to externalize it as a standalone connector in accordance with the current connector development model. For more information about the externalization process, see FLIP-439.
 Highlights # The connector is built on the already existing Apache Bahir code. The existing DataStream connector is updated to Sink V2 API. New DataStream Source API connector implementation. The Table API source and sink connectors are now using the new Schema stack. The first released connector version is 2.0.0, and it supports Flink 1.19, and 1.20. DataStream Source API # The Source API implementation is a net new addition to the externalized connector, and it brings some interesting features. Although Kudu itself is a bounded source, the Kudu Source implementation supports to configure boundedness, and can run in CONTINUOUS_UNBOUNDED mode. In CONTINUOUS_UNBOUNDED mode, the source operates similarly to a Change Data Capture (CDC) system. When the job starts, it takes a snapshot of the source table and records the snapshot timestamp. After that, it performs periodic differential scans, capturing only the changes that occurred since the last scan. The frequency of these scans is determined by the .setDiscoveryPeriod(Duration) setting. The following example demonstrates how to stream data from a Kudu table, capturing updates every one minute.
 KuduSource&lt;Row&gt; source = KuduSource.&lt;Row&gt;builder() .setTableInfo(...) .setReaderConfig(...) .setRowResultConverter(new RowResultRowConverter()) .setBoundedness(Boundedness.CONTINUOUS_UNBOUNDED) .setDiscoveryPeriod(Duration.ofMinutes(1)) .build(); For more details and examples, see the DataStream connector documentation
 Table API Catalog # The connector includes a catalog implementation designed to manage metadata for your Kudu setup and facilitate table operations. With the Kudu catalog, you can access all existing Kudu tables directly through Flink SQL queries. Such catalog can be defined in Flink SQL, as part of the Java application, or via a YAML catalog descriptor as well. The below example shows a minimal example in Filnk SQL.
 CREATE CATALOG my_kudu_catalog WITH( &#39;type&#39; = &#39;kudu&#39;, &#39;masters&#39; = &#39;localhost:7051&#39;, &#39;default-database&#39; = &#39;default_database&#39; ); USE CATALOG my_kudu_catalog; For other Table API related topics and examples, see the Table API connector documentation
 Release Notes # Sub-task # [FLINK-34929] - Create &ldquo;flink-connector-kudu&rdquo; repository [FLINK-34930] - Move existing Kudu connector code from Bahir repo to dedicated repo [FLINK-34931] - Update Kudu DataStream connector to use Sink V2 [FLINK-35114] - Remove old Table API implementations, update Schema stack [FLINK-35350] - Add documentation for Kudu connector [FLINK-37389] - Add &ldquo;flink-sql-connector-kudu&rdquo; module New Feature # [FLINK-36855] - Implement Source API in Kudu connector [FLINK-37527] - Add KuduSource documentation [FLINK-37664] - Integrate Kudu connector docs Improvement # [FLINK-36839] - Update Kudu version to 1.17.1 [FLINK-37190] - Make Kudu FlushMode configurable in Flink SQL [FLINK-37230] - Consolidate Kudu connector table options [FLINK-37237] - Improve Kudu table creation based on Flink SQL CREATE TABLE List of Contributors # Ferenc Csaky, Martijn Visser, Marton Greber
-`}),e.add({id:44,href:"/2025/03/24/apache-flink-2.0.0-a-new-era-of-real-time-data-processing/",title:"Apache Flink 2.0.0: A new Era of Real-Time Data Processing",section:"Flink Blog",content:`Today, the Flink PMC is proud to announce the official release of Apache Flink 2.0.0! This marks the first release in the Flink 2.x series and is the first major release since Flink 1.0 launched nine years ago. This version is the culmination of two years of meticulous preparation and collaboration, signifying a new chapter in the evolution of Flink.
+`}),e.add({id:45,href:"/2025/03/24/apache-flink-2.0.0-a-new-era-of-real-time-data-processing/",title:"Apache Flink 2.0.0: A new Era of Real-Time Data Processing",section:"Flink Blog",content:`Today, the Flink PMC is proud to announce the official release of Apache Flink 2.0.0! This marks the first release in the Flink 2.x series and is the first major release since Flink 1.0 launched nine years ago. This version is the culmination of two years of meticulous preparation and collaboration, signifying a new chapter in the evolution of Flink.
 In this release, 165 contributors have come together to complete 25 FLIPs (Flink Improvement Proposals) and 369 issues. We extend our heartfelt gratitude to all contributors for their invaluable contributions to this milestone release!
 Over the past decade, Apache Flink has undergone transformative evolution. In the 1.0 era, Flink pioneered Stateful Computations over Data Streams, making end-to-end exactly-once stateful stream processing a reality. Today, real-time processing with sub-second latency has become a standard expectation. However, users of real-time computing now face new challenges that hinder broader adoption. The costs of real-time computing have remained prohibitively high, both in terms of expensive resource consumption and the steep learning curve required to master complex distributed stream processing concepts. These barriers limit the application of real-time computing across more diverse use cases. Meanwhile, the rapid emergence of modern trends such as cloud-native architectures, data lakes, and AI LLMs has introduced new requirements for real-time systems. In the 2.0 era, Flink is tackling these challenges head-on. By addressing these pain points, Flink aims to deliver more accessible and scalable real-time computing solutions, empowering organizations to fully embrace real-time capabilities across the entire spectrum of big data and AI applications. This new chapter represents Flink&rsquo;s commitment to making real-time computing more practical, efficient, and widely applicable than ever before.
 In the 2.0 release, Flink introduces several innovative features that address key challenges in real-time data processing and align with the growing demands of modern applications, including AI-driven workflows.
@@ -640,7 +650,7 @@
 /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt
 /jobs/:jobid/vertices/:vertexid/subtasktimes
 /jobs/:jobid/vertices/:vertexid/taskmanagers
-/jobs/:jobid/taskmanagers/:taskmanagerid/log-url In their responses, the &ldquo;host&rdquo;, &ldquo;subtasks.host&rdquo; or &ldquo;taskmanagers.host&rdquo; property is removed. List of removed CLI options # sql-client.sh: -u,--update &lt;SQL update statement&gt; is removed flink-client: run-application action is removed: Please use run -t kubernetes-application to run Kubernetes Application mode `}),e.add({id:45,href:"/2025/03/03/apache-flink-kubernetes-operator-1.11.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.11.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.11.0! The version brings a number of important fixes and improvements to both core and autoscaler modules.
+/jobs/:jobid/taskmanagers/:taskmanagerid/log-url In their responses, the &ldquo;host&rdquo;, &ldquo;subtasks.host&rdquo; or &ldquo;taskmanagers.host&rdquo; property is removed. List of removed CLI options # sql-client.sh: -u,--update &lt;SQL update statement&gt; is removed flink-client: run-application action is removed: Please use run -t kubernetes-application to run Kubernetes Application mode `}),e.add({id:46,href:"/2025/03/03/apache-flink-kubernetes-operator-1.11.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.11.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.11.0! The version brings a number of important fixes and improvements to both core and autoscaler modules.
 We encourage you to download the release and share your experience with the community through the Flink mailing lists or JIRA! We&rsquo;re looking forward to your feedback!
 Highlights # Flink 2.0 Preview Support # The Flink Kubernetes Operator and Autoscaler 1.11.0 brings support for Flink 2.0 preview version. This should help users to try out and verify the latest features in Flink planned for the 2.0 release.
 apiVersion: flink.apache.org/v1beta1 kind: FlinkDeployment metadata: name: basic-example spec: image: flink:2.0 flinkVersion: v2_0 ... Make sure to update the operator CRD to be able to deploy Flink 2.0 pipelines.
@@ -657,26 +667,26 @@
 $ helm repo add flink-kubernetes-operator-1.11.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.11.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.11.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Alan Zhang, Anupam Aggarwal, Dao Thanh Tung, Gunnar Morling, Gyula Fora, Keith Wall, Luca Castelli, Luke Chen, Matyas Orhidi, Maximilian Michels, Rui Fan, Sam Barker, Shuyi Chen, Thomas Cooper, big face cat, dsaisharath, fqaiser94, huyuanfeng, mateczagany, sharath1709, timsn, yangjf2019
-`}),e.add({id:46,href:"/2025/02/12/apache-flink-1.19.2-release-announcement/",title:"Apache Flink 1.19.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.19 series.
+`}),e.add({id:47,href:"/2025/02/12/apache-flink-1.19.2-release-announcement/",title:"Apache Flink 1.19.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.19 series.
 This release includes 73 bug fixes, vulnerability fixes, and minor improvements for Flink 1.19. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.19.2.
 Note: Flink uses Apache Pekko for RPC communication, which in turn depends on Netty. In a recent Pekko update, its Netty dependency was upgraded from Netty 3 to Netty 4, allowing Flink to drop Netty 3 entirely. This change is important and desireable because Netty 3 has been end-of-life for several years and has a large number of known security vulnerabilities (CVEs). The Flink community agreed to include this update in a patch release, despite minor differences in default memory allocation between Netty 3 and Netty 4. By default, Netty 4 allocates slightly more memory to improve performance. However, we believe this will have no impact on the vast majority of use cases.
 For a detailed overview of these differences and available configuration options, please refer to the relevant section of the Flink documentation.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.19.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.19.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.19.2&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.19.2 Release Notes # Release Notes - Flink - Version 1.19.2 Bug [FLINK-30899] - FileSystemTableSource with CSV format incorrectly selects fields if filtering for partition [FLINK-33117] - a wrong scala example in udfs page [FLINK-33192] - State memory leak in the Window Operator due to unregistered cleanup timer [FLINK-33571] - Bump json-path from 2.7.0 to 2.9.0 [FLINK-33936] - Outputting Identical Results in Mini-Batch Aggregation with Set TTL [FLINK-34252] - WatermarkAssignerOperator should not emit WatermarkStatus.IDLE under continuous data flow [FLINK-35069] - ContinuousProcessingTimeTrigger continuously registers timers in a loop at the end of the window [FLINK-35157] - Sources with watermark alignment get stuck once some subtasks finish [FLINK-35318] - incorrect timezone handling for TIMESTAMP_WITH_LOCAL_TIME_ZONE type during predicate pushdown [FLINK-35334] - Code generation: init method exceeds 64 KB when there is a long array field with Table API [FLINK-35498] - Unexpected argument name conflict error when do extract method params from udf [FLINK-35522] - The source task may get stuck after a failover occurs in batch jobs [FLINK-35531] - Avoid calling hsync in flush method in BaseHadoopFsRecoverableFsDataOutputStream [FLINK-35537] - Error when setting state.backend.rocksdb.compression.per.level [FLINK-35685] - Some metrics in the MetricStore are duplicated when increasing or decreasing task parallelism [FLINK-35699] - Incorrect Jackson shade through fabric8 in flink-kubernetes [FLINK-35721] - I found out that in the Flink SQL documentation it says that Double type cannot be converted to Boolean type, but in reality, it can. [FLINK-35731] - Sink V2 operator is mistakenly assumed always to be parallelism configured [FLINK-35750] - The latency marker metrics aren&#39;t updated after failover [FLINK-35764] - TimerGauge is incorrect when update is called during a measurement [FLINK-35786] - NPE in BlobServer / shutdownHook [FLINK-35833] - ArtifactFetchManager always creates artifact dir [FLINK-35885] - proctime aggregate window triggered by watermark [FLINK-35886] - Incorrect watermark idleness timeout accounting when subtask is backpressured/blocked [FLINK-35887] - Null Pointer Exception in TypeExtractor.isRecord when trying to provide type info for interface [FLINK-35935] - CREATE TABLE AS doesn&#39;t work with LIMIT [FLINK-35977] - Missing an import in datastream.md [FLINK-36000] - DynamicTableSink#Context&#39;s getTargetColumns should not return an array of zero length [FLINK-36116] - Javadocs aren&#39;t visible anymore for Flink 1.17 and above [FLINK-36260] - numBytesInLocal and numBuffersInLocal being reported as remote [FLINK-36287] - Sink with topologies should not participate in UC [FLINK-36318] - Fail to restore from 1.18 if LAG function is used [FLINK-36368] - Fix subtask management in CommittableCollector [FLINK-36379] - Improve (Global)Committer with UC disabled [FLINK-36405] - Fix startup issues due to Hive delegation token on kerberos clusters [FLINK-36417] - STATE_TTL doesn&#39;t work with WatermarkAssigner [FLINK-36421] - Missing fsync in FsCheckpointStreamFactory [FLINK-36451] - Kubernetes Application JobManager Potential Deadlock and TaskManager Pod Residuals [FLINK-36455] - Sink should commit everything on notifyCheckpointCompleted [FLINK-36511] - FlinkSecurityManager#checkExit StackOverFlow if haltOnSystemExit is enabled [FLINK-36530] - Not able to restore list state from S3 [FLINK-36533] - Fix detecting bind failure in case of Netty EPOLL transport [FLINK-36571] - Flink dashboard does not show Busy / Backpressure [FLINK-36788] - Add coverage for GlobalCommitter for SinkV2 [FLINK-36941] - Fix Doc for DATE_FORMAT [FLINK-37016] - NPE when ClusterEntrypoing is shut down before initialization [FLINK-37025] - Periodic SQL watermarks can travel back in time [FLINK-37084] - NullPointerException occurs during handling timer of window in PyFlink [FLINK-37168] - TimerRegistrationAction in unregisteredTimers is not cleaned up properly after the timers are registered [FLINK-37183] - Usrlib symlinks are not followed Improvement [FLINK-33730] - Update the compatibility table to only include last three released versions [FLINK-35353] - Translate &quot;Profiler&quot; page into Chinese [FLINK-35453] - StreamReader Charset fix with UTF8 in core files [FLINK-36593] - Upgrade io.airlift:aircompressor to mitigate CVE [FLINK-36643] - Upgrade aws-java-sdk-core to 1.12.779 to fix vulnerability [FLINK-36689] - [UI] Upgrade runtime web Angular framework and associated deps to latest version [FLINK-36716] - Address vulnerabilities in Flink UI [FLINK-37099] - Document possible Netty4 memory related configurations Technical Debt [FLINK-36420] - Upgrade org.apache.avro:avro to 1.11.4 [FLINK-36510] - Upgrade Pekko from 1.0.1 to 1.1.2 `}),e.add({id:47,href:"/2025/02/12/apache-flink-1.20.1-release-announcement/",title:"Apache Flink 1.20.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.20 series.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.19.2 Release Notes # Release Notes - Flink - Version 1.19.2 Bug [FLINK-30899] - FileSystemTableSource with CSV format incorrectly selects fields if filtering for partition [FLINK-33117] - a wrong scala example in udfs page [FLINK-33192] - State memory leak in the Window Operator due to unregistered cleanup timer [FLINK-33571] - Bump json-path from 2.7.0 to 2.9.0 [FLINK-33936] - Outputting Identical Results in Mini-Batch Aggregation with Set TTL [FLINK-34252] - WatermarkAssignerOperator should not emit WatermarkStatus.IDLE under continuous data flow [FLINK-35069] - ContinuousProcessingTimeTrigger continuously registers timers in a loop at the end of the window [FLINK-35157] - Sources with watermark alignment get stuck once some subtasks finish [FLINK-35318] - incorrect timezone handling for TIMESTAMP_WITH_LOCAL_TIME_ZONE type during predicate pushdown [FLINK-35334] - Code generation: init method exceeds 64 KB when there is a long array field with Table API [FLINK-35498] - Unexpected argument name conflict error when do extract method params from udf [FLINK-35522] - The source task may get stuck after a failover occurs in batch jobs [FLINK-35531] - Avoid calling hsync in flush method in BaseHadoopFsRecoverableFsDataOutputStream [FLINK-35537] - Error when setting state.backend.rocksdb.compression.per.level [FLINK-35685] - Some metrics in the MetricStore are duplicated when increasing or decreasing task parallelism [FLINK-35699] - Incorrect Jackson shade through fabric8 in flink-kubernetes [FLINK-35721] - I found out that in the Flink SQL documentation it says that Double type cannot be converted to Boolean type, but in reality, it can. [FLINK-35731] - Sink V2 operator is mistakenly assumed always to be parallelism configured [FLINK-35750] - The latency marker metrics aren&#39;t updated after failover [FLINK-35764] - TimerGauge is incorrect when update is called during a measurement [FLINK-35786] - NPE in BlobServer / shutdownHook [FLINK-35833] - ArtifactFetchManager always creates artifact dir [FLINK-35885] - proctime aggregate window triggered by watermark [FLINK-35886] - Incorrect watermark idleness timeout accounting when subtask is backpressured/blocked [FLINK-35887] - Null Pointer Exception in TypeExtractor.isRecord when trying to provide type info for interface [FLINK-35935] - CREATE TABLE AS doesn&#39;t work with LIMIT [FLINK-35977] - Missing an import in datastream.md [FLINK-36000] - DynamicTableSink#Context&#39;s getTargetColumns should not return an array of zero length [FLINK-36116] - Javadocs aren&#39;t visible anymore for Flink 1.17 and above [FLINK-36260] - numBytesInLocal and numBuffersInLocal being reported as remote [FLINK-36287] - Sink with topologies should not participate in UC [FLINK-36318] - Fail to restore from 1.18 if LAG function is used [FLINK-36368] - Fix subtask management in CommittableCollector [FLINK-36379] - Improve (Global)Committer with UC disabled [FLINK-36405] - Fix startup issues due to Hive delegation token on kerberos clusters [FLINK-36417] - STATE_TTL doesn&#39;t work with WatermarkAssigner [FLINK-36421] - Missing fsync in FsCheckpointStreamFactory [FLINK-36451] - Kubernetes Application JobManager Potential Deadlock and TaskManager Pod Residuals [FLINK-36455] - Sink should commit everything on notifyCheckpointCompleted [FLINK-36511] - FlinkSecurityManager#checkExit StackOverFlow if haltOnSystemExit is enabled [FLINK-36530] - Not able to restore list state from S3 [FLINK-36533] - Fix detecting bind failure in case of Netty EPOLL transport [FLINK-36571] - Flink dashboard does not show Busy / Backpressure [FLINK-36788] - Add coverage for GlobalCommitter for SinkV2 [FLINK-36941] - Fix Doc for DATE_FORMAT [FLINK-37016] - NPE when ClusterEntrypoing is shut down before initialization [FLINK-37025] - Periodic SQL watermarks can travel back in time [FLINK-37084] - NullPointerException occurs during handling timer of window in PyFlink [FLINK-37168] - TimerRegistrationAction in unregisteredTimers is not cleaned up properly after the timers are registered [FLINK-37183] - Usrlib symlinks are not followed Improvement [FLINK-33730] - Update the compatibility table to only include last three released versions [FLINK-35353] - Translate &quot;Profiler&quot; page into Chinese [FLINK-35453] - StreamReader Charset fix with UTF8 in core files [FLINK-36593] - Upgrade io.airlift:aircompressor to mitigate CVE [FLINK-36643] - Upgrade aws-java-sdk-core to 1.12.779 to fix vulnerability [FLINK-36689] - [UI] Upgrade runtime web Angular framework and associated deps to latest version [FLINK-36716] - Address vulnerabilities in Flink UI [FLINK-37099] - Document possible Netty4 memory related configurations Technical Debt [FLINK-36420] - Upgrade org.apache.avro:avro to 1.11.4 [FLINK-36510] - Upgrade Pekko from 1.0.1 to 1.1.2 `}),e.add({id:48,href:"/2025/02/12/apache-flink-1.20.1-release-announcement/",title:"Apache Flink 1.20.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.20 series.
 This release includes 75 bug fixes, vulnerability fixes, and minor improvements for Flink 1.20. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.20.1.
 Note: Flink uses Apache Pekko for RPC communication, which in turn depends on Netty. In a recent Pekko update, its Netty dependency was upgraded from Netty 3 to Netty 4, allowing Flink to drop Netty 3 entirely. This change is important and desireable because Netty 3 has been end-of-life for several years and has a large number of known security vulnerabilities (CVEs). The Flink community agreed to include this update in a patch release, despite minor differences in default memory allocation between Netty 3 and Netty 4. By default, Netty 4 allocates slightly more memory to improve performance. However, we believe this will have no impact on the vast majority of use cases.
 For a detailed overview of these differences and available configuration options, please refer to the relevant section of the Flink documentation.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.20.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.20.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.20.1&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.20.1 Release Notes # Release Notes - Flink - Version 1.20.1 Bug [FLINK-29065] - Flink v1.15.1 contains netty(version:3.10.6). There are many vulnerabilities, like CVE-2021-21409 etc. please confirm these version and fix. thx [FLINK-29797] - can&#39;t run a job on yarn, if set fs.default-scheme [FLINK-30899] - FileSystemTableSource with CSV format incorrectly selects fields if filtering for partition [FLINK-33117] - a wrong scala example in udfs page [FLINK-33571] - Bump json-path from 2.7.0 to 2.9.0 [FLINK-34194] - Upgrade Flink CI Docker container to Ubuntu 22.04 [FLINK-35334] - Code generation: init method exceeds 64 KB when there is a long array field with Table API [FLINK-35721] - I found out that in the Flink SQL documentation it says that Double type cannot be converted to Boolean type, but in reality, it can. [FLINK-35764] - TimerGauge is incorrect when update is called during a measurement [FLINK-35766] - When the job contains many YieldingOperatorFactory instances, compiling the JobGraph hangs [FLINK-35833] - ArtifactFetchManager always creates artifact dir [FLINK-35885] - proctime aggregate window triggered by watermark [FLINK-35886] - Incorrect watermark idleness timeout accounting when subtask is backpressured/blocked [FLINK-35887] - Null Pointer Exception in TypeExtractor.isRecord when trying to provide type info for interface [FLINK-35935] - CREATE TABLE AS doesn&#39;t work with LIMIT [FLINK-35977] - Missing an import in datastream.md [FLINK-36000] - DynamicTableSink#Context&#39;s getTargetColumns should not return an array of zero length [FLINK-36116] - Javadocs aren&#39;t visible anymore for Flink 1.17 and above [FLINK-36125] - File not found exception on restoring state handles with file merging [FLINK-36173] - Invalid link in checkpoint documentation [FLINK-36227] - NullPointerException when starting flink with logback logger [FLINK-36260] - numBytesInLocal and numBuffersInLocal being reported as remote [FLINK-36287] - Sink with topologies should not participate in UC [FLINK-36318] - Fail to restore from 1.18 if LAG function is used [FLINK-36368] - Fix subtask management in CommittableCollector [FLINK-36379] - Improve (Global)Committer with UC disabled [FLINK-36405] - Fix startup issues due to Hive delegation token on kerberos clusters [FLINK-36417] - STATE_TTL doesn&#39;t work with WatermarkAssigner [FLINK-36421] - Missing fsync in FsCheckpointStreamFactory [FLINK-36451] - Kubernetes Application JobManager Potential Deadlock and TaskManager Pod Residuals [FLINK-36455] - Sink should commit everything on notifyCheckpointCompleted [FLINK-36511] - FlinkSecurityManager#checkExit StackOverFlow if haltOnSystemExit is enabled [FLINK-36530] - Not able to restore list state from S3 [FLINK-36533] - Fix detecting bind failure in case of Netty EPOLL transport [FLINK-36543] - Table API over windows cannot be string serialized [FLINK-36571] - Flink dashboard does not show Busy / Backpressure [FLINK-36642] - Table API expressions with several built-in functions are not correctly serialized to SQL [FLINK-36644] - TIMESTAMPDIFF can not be string serialized [FLINK-36654] - Decimal divide Integer reports Null pointer exception [FLINK-36714] - sstmerge/CompactionScheduler termination can be interrupted [FLINK-36788] - Add coverage for GlobalCommitter for SinkV2 [FLINK-36840] - Multiple time-related built-in functions cannot be called [FLINK-36856] - CollectSinkOperatorFactory is not respecting batch size and socket timeout configs [FLINK-36883] - Views should keep the time attributes of the query [FLINK-36941] - Fix Doc for DATE_FORMAT [FLINK-37016] - NPE when ClusterEntrypoing is shut down before initialization [FLINK-37025] - Periodic SQL watermarks can travel back in time [FLINK-37029] - Fix the materialized table docs of full mode cron expression [FLINK-37084] - NullPointerException occurs during handling timer of window in PyFlink [FLINK-37098] - Can not select time attribute from a view [FLINK-37168] - TimerRegistrationAction in unregisteredTimers is not cleaned up properly after the timers are registered [FLINK-37183] - Usrlib symlinks are not followed Improvement [FLINK-36021] - Delegating the responsibility for compression to every tier [FLINK-36127] - Support sorting watermark on flink web [FLINK-36593] - Upgrade io.airlift:aircompressor to mitigate CVE [FLINK-36643] - Upgrade aws-java-sdk-core to 1.12.779 to fix vulnerability [FLINK-36689] - [UI] Upgrade runtime web Angular framework and associated deps to latest version [FLINK-36716] - Address vulnerabilities in Flink UI [FLINK-37099] - Document possible Netty4 memory related configurations Technical Debt [FLINK-36420] - Upgrade org.apache.avro:avro to 1.11.4 [FLINK-36468] - Use Flink Preconditions util instead of Parquet [FLINK-36510] - Upgrade Pekko from 1.0.1 to 1.1.2 `}),e.add({id:48,href:"/2025/01/21/apache-flink-cdc-3.3.0-release-announcement/",title:"Apache Flink CDC 3.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is excited to announce the release of Flink CDC 3.3.0! This release introduces more features in transform and connectors and improve usability and stability of existing features.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.20.1 Release Notes # Release Notes - Flink - Version 1.20.1 Bug [FLINK-29065] - Flink v1.15.1 contains netty(version:3.10.6). There are many vulnerabilities, like CVE-2021-21409 etc. please confirm these version and fix. thx [FLINK-29797] - can&#39;t run a job on yarn, if set fs.default-scheme [FLINK-30899] - FileSystemTableSource with CSV format incorrectly selects fields if filtering for partition [FLINK-33117] - a wrong scala example in udfs page [FLINK-33571] - Bump json-path from 2.7.0 to 2.9.0 [FLINK-34194] - Upgrade Flink CI Docker container to Ubuntu 22.04 [FLINK-35334] - Code generation: init method exceeds 64 KB when there is a long array field with Table API [FLINK-35721] - I found out that in the Flink SQL documentation it says that Double type cannot be converted to Boolean type, but in reality, it can. [FLINK-35764] - TimerGauge is incorrect when update is called during a measurement [FLINK-35766] - When the job contains many YieldingOperatorFactory instances, compiling the JobGraph hangs [FLINK-35833] - ArtifactFetchManager always creates artifact dir [FLINK-35885] - proctime aggregate window triggered by watermark [FLINK-35886] - Incorrect watermark idleness timeout accounting when subtask is backpressured/blocked [FLINK-35887] - Null Pointer Exception in TypeExtractor.isRecord when trying to provide type info for interface [FLINK-35935] - CREATE TABLE AS doesn&#39;t work with LIMIT [FLINK-35977] - Missing an import in datastream.md [FLINK-36000] - DynamicTableSink#Context&#39;s getTargetColumns should not return an array of zero length [FLINK-36116] - Javadocs aren&#39;t visible anymore for Flink 1.17 and above [FLINK-36125] - File not found exception on restoring state handles with file merging [FLINK-36173] - Invalid link in checkpoint documentation [FLINK-36227] - NullPointerException when starting flink with logback logger [FLINK-36260] - numBytesInLocal and numBuffersInLocal being reported as remote [FLINK-36287] - Sink with topologies should not participate in UC [FLINK-36318] - Fail to restore from 1.18 if LAG function is used [FLINK-36368] - Fix subtask management in CommittableCollector [FLINK-36379] - Improve (Global)Committer with UC disabled [FLINK-36405] - Fix startup issues due to Hive delegation token on kerberos clusters [FLINK-36417] - STATE_TTL doesn&#39;t work with WatermarkAssigner [FLINK-36421] - Missing fsync in FsCheckpointStreamFactory [FLINK-36451] - Kubernetes Application JobManager Potential Deadlock and TaskManager Pod Residuals [FLINK-36455] - Sink should commit everything on notifyCheckpointCompleted [FLINK-36511] - FlinkSecurityManager#checkExit StackOverFlow if haltOnSystemExit is enabled [FLINK-36530] - Not able to restore list state from S3 [FLINK-36533] - Fix detecting bind failure in case of Netty EPOLL transport [FLINK-36543] - Table API over windows cannot be string serialized [FLINK-36571] - Flink dashboard does not show Busy / Backpressure [FLINK-36642] - Table API expressions with several built-in functions are not correctly serialized to SQL [FLINK-36644] - TIMESTAMPDIFF can not be string serialized [FLINK-36654] - Decimal divide Integer reports Null pointer exception [FLINK-36714] - sstmerge/CompactionScheduler termination can be interrupted [FLINK-36788] - Add coverage for GlobalCommitter for SinkV2 [FLINK-36840] - Multiple time-related built-in functions cannot be called [FLINK-36856] - CollectSinkOperatorFactory is not respecting batch size and socket timeout configs [FLINK-36883] - Views should keep the time attributes of the query [FLINK-36941] - Fix Doc for DATE_FORMAT [FLINK-37016] - NPE when ClusterEntrypoing is shut down before initialization [FLINK-37025] - Periodic SQL watermarks can travel back in time [FLINK-37029] - Fix the materialized table docs of full mode cron expression [FLINK-37084] - NullPointerException occurs during handling timer of window in PyFlink [FLINK-37098] - Can not select time attribute from a view [FLINK-37168] - TimerRegistrationAction in unregisteredTimers is not cleaned up properly after the timers are registered [FLINK-37183] - Usrlib symlinks are not followed Improvement [FLINK-36021] - Delegating the responsibility for compression to every tier [FLINK-36127] - Support sorting watermark on flink web [FLINK-36593] - Upgrade io.airlift:aircompressor to mitigate CVE [FLINK-36643] - Upgrade aws-java-sdk-core to 1.12.779 to fix vulnerability [FLINK-36689] - [UI] Upgrade runtime web Angular framework and associated deps to latest version [FLINK-36716] - Address vulnerabilities in Flink UI [FLINK-37099] - Document possible Netty4 memory related configurations Technical Debt [FLINK-36420] - Upgrade org.apache.avro:avro to 1.11.4 [FLINK-36468] - Use Flink Preconditions util instead of Parquet [FLINK-36510] - Upgrade Pekko from 1.0.1 to 1.1.2 `}),e.add({id:49,href:"/2025/01/21/apache-flink-cdc-3.3.0-release-announcement/",title:"Apache Flink CDC 3.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is excited to announce the release of Flink CDC 3.3.0! This release introduces more features in transform and connectors and improve usability and stability of existing features.
 Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!
 Highlights # Connectors # New Pipeline Connectors # Flink CDC 3.3.0 introduces 2 new pipeline connectors:
 OceanBase sink MaxCompute sink MySQL # Support parsing gh-ost and pt-osc generated schema changes. Parse array-typed key index binlog created between 8.0.17 and 8.0.18 rightly. Support passing op_ts to meta field in Event. Support parsing the comments of table and column. Fixed deadlock after adding new tables. Fixed a bug that works with FLOAT type with precision. Apache Paimon # Apply default value options when apply add column change. Reuse sequence number when schema evolution happened. Fixed commit conflict problem in PaimonSink. Remove Catalog.ColumnAlreadyExistException when apply applyAddColumnEventWithPosition in paimon. Bump Paimon version to 0.9.0. Postgres # Support metadata &lsquo;op_type&rsquo; virtual column for Postgres CDC Connector. Improve PostgresDialect.discoverDataCollections to reduce the start time of Postgres CDC. Flink CDC Base # CDC framework split snapshot chunks asynchronously. Improve the ROW data deserialization performance of DebeziumEventDeserializationScheme. Allow applying Truncate &amp; Drop table to Doris/Paimon/Starrocks connectors. The flink-cdc-base module supports source metric statistics. Use fixed format for SnapshotSplit&rsquo;s splitId in all connectors. Merge result of data type BIGINT and DOUBLE is DOUBLE instead of STRING. Upgrade Flink compatibility to 1.19+ # Flink latest version have been updated to 1.20. Flink CDC version 3.3.0 will support Flink 1.19+ and drop supports for Flink 1.17.* and 1.18.* .
 Cdcup: quickly start a testing job # Flink CDC 3.3.0 comes with a cdc-up utility script to set up a data integration PoC pipeline and required environment easily. Follow the latest quickstart steps to get started.
 Transform # Support timestampdiff, timestampadd, unix_timestamp function. Support to add metadata columns for data in the meta fields of DataChangeEvent at transform. Support to convert delete events as insert events. Add &ldquo;OpType&rdquo; metadata column in transform. Support for AI Model Integration for Data Processing. Improve get source field value by column name in PreTransformProcessor. Deduce primary key column types to be NOT NULL. List of Contributors # We would like to express gratitude to all the contributors working on this release:
 ConradJam, Hang Ruan, Hongshun Wang, Jason Zhang, Junbo wang, Jzjsnow, Kunni, Leonard Xu, MOBIN, North Lin, Olivier, Petrichor, Robin Moffatt, Runkang He, Sergei Morozov, Seung-Min Lee, Shawn Huang, Thorne, Timi, Umesh Dangat, Wink, Xin Gong, hiliuxg, liuxiaodong, moses, ouyangwulin, stayrascal, wenmo, wudi, yuanoOo, yuxiqian, MOBIN-F, helloliuxg, jzjsnow, molin.lxd, wuzhiping, zhangchaoming.zcm
-`}),e.add({id:49,href:"/2024/12/05/introducing-the-new-prometheus-connector/",title:"Introducing the new Prometheus connector",section:"Flink Blog",content:`We are excited to announce a new sink connector that enables writing data to Prometheus (FLIP-312). This articles introduces the main features of the connector, and the reasoning behind design decisions.
+`}),e.add({id:50,href:"/2024/12/05/introducing-the-new-prometheus-connector/",title:"Introducing the new Prometheus connector",section:"Flink Blog",content:`We are excited to announce a new sink connector that enables writing data to Prometheus (FLIP-312). This articles introduces the main features of the connector, and the reasoning behind design decisions.
 This connector allows writing data to Prometheus using the Remote-Write push interface, which lets you write time-series data to Prometheus at scale.
 Motivations for a Prometheus connector # Prometheus is an efficient time-series database optimized for building real-time dashboards and alerts, typically in combination with Grafana or other visualization tools.
 Prometheus is commonly used for observability, to monitor compute resources, Kubernetes clusters, and applications. It can also be used to observe Flink clusters and jobs. The Flink Metric Reporter has exactly this purpose.
@@ -730,13 +740,13 @@
 However, validating well-formed input would come with a significant performance cost. It would require checking every Label with a regular expression, and checking the ordering of the list of Labels, on every single input record.
 Additionally, checking Sample ordering in the sink would not allow reordering, unless you introduce some form of longer windowing that would inevitably increase latency. If latency is not a problem, some form of reordering can be implemented by the user, upstream of the connector.
 List of Contributors # Lorenzo Nicora, Hong Teoh, Francisco Morillo, Karthi Thyagarajan
-`}),e.add({id:50,href:"/2024/11/27/apache-flink-cdc-3.2.1-release-announcement/",title:"Apache Flink CDC 3.2.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink CDC 3.2 series.
+`}),e.add({id:51,href:"/2024/11/27/apache-flink-cdc-3.2.1-release-announcement/",title:"Apache Flink CDC 3.2.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink CDC 3.2 series.
 The release contains fixes for several critical issues and improves compatibilities with Apache Flink. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink CDC 3.2.1.
 Release Notes # Sub-task # [FLINK-36221] Add specification about CAST &hellip; AS &hellip; built-in functions Bug # [FLINK-35980] Add transform test coverage in Integrated / E2e tests [FLINK-35982] Transform metadata config doesn&rsquo;t work if no projection block was provided [FLINK-35985] SUBSTRING function not available in transform rules [FLINK-36105] CDC pipeline job could not restore from state in Flink 1.20 [FLINK-36247] Potential transaction leak during MySQL snapshot phase [FLINK-36326] Newly added table failed in mysql pipeline connector [FLINK-36347] Using the offset obtained after a query transaction as a high watermark cannot ensure exactly-once semantics [FLINK-36375] Missing default value in AddColumnEvent/RenameColumnEvent [FLINK-36407] SchemaRegistry doesn&rsquo;t shutdown its underlying ExecutorService upon closing [FLINK-36408] MySQL pipeline connector could not work with FLOAT type with precision [FLINK-36461] YAML job failed to schema evolve with unmatched transform tables [FLINK-36474] YAML Table-merging route should accept more type widening cases [FLINK-36509] Fix &lsquo;Unsupported bucket mode: GLOBAL_DYNAMIC&rsquo; error in Paimon Pipeline Sink. [FLINK-36517] Duplicate commit the same datafile in Paimon Sink [FLINK-36560] Fix the issue of timestamp_ltz increasing by 8 hours in Paimon [FLINK-36572] The local time zone is wrongly set in StarRocks pipeline sink [FLINK-36596] YAML Pipeline fails to schema change with no projection fields specified [FLINK-36649] Oracle When reading via OracleIncrementalSource, the connection is occasionally closed [FLINK-36656] Flink CDC treats MySQL Sharding table with boolean type conversion error [FLINK-36681] Wrong chunks splitting query in incremental snapshot reading section in mysql cdc doc Improvement # [FLINK-35291] Improve the ROW data deserialization performance of DebeziumEventDeserializationScheme [FLINK-35592] MysqlDebeziumTimeConverter miss timezone convert to timestamp [FLINK-36052] add elasticsearch.md for elasticsearch pipeline connector [FLINK-36093] PreTransform operator wrongly filters out columns when multiple transform rules were defined [FLINK-36151] Add documentations for Schema Evolution related options [FLINK-36211] Shade kafka related package in Kafka Pipeline connector [FLINK-36214] Error log when building flink-cdc-pipeline-udf-examples from source code [FLINK-36541] Occasional met commit conflict problem in PaimonSink [FLINK-36565] Pipeline YAML should allow merging decimal with different precisions [FLINK-36678] The Flink CDC Yarn deployment mode document description is incorrect [FLINK-36750] Paimon connector would reuse sequence number when schema evolution happened Release Resources # The source artifacts and binaries are available on the Downloads page of the Flink website.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # yuxiqian, Xin Gong, Hang Ruan, wudi, qg-lin, Timi988, lvyanquan, ConradJam, Runkang He, Junbo wang, MOBIN, Leonard Xu, Sergei Morozov, liuzeshan
-`}),e.add({id:51,href:"/2024/11/25/introducing-the-new-amazon-kinesis-data-stream-and-amazon-dynamodb-stream-sources/",title:"Introducing the new Amazon Kinesis Data Stream and Amazon DynamoDB Stream sources",section:"Flink Blog",content:`We are pleased to introduce updated versions of the Amazon Kinesis Data Stream and Amazon DynamoDB Stream sources. Built on the FLIP-27 source interface, these newer connectors introduce 7 new features and are compatible with Flink 2.0.
+`}),e.add({id:52,href:"/2024/11/25/introducing-the-new-amazon-kinesis-data-stream-and-amazon-dynamodb-stream-sources/",title:"Introducing the new Amazon Kinesis Data Stream and Amazon DynamoDB Stream sources",section:"Flink Blog",content:`We are pleased to introduce updated versions of the Amazon Kinesis Data Stream and Amazon DynamoDB Stream sources. Built on the FLIP-27 source interface, these newer connectors introduce 7 new features and are compatible with Flink 2.0.
 The new KinesisStreamsSource replaces the legacy FlinkKinesisConsumer; and the new DynamoDbStreamsSource replaces the legacy FlinkDynamoDBStreamsConsumer. The new connectors are available for Flink 1.19 onwards, and AWS Connector version 5.0.0 onwards. For more information, see the section on Dependencies.
 In this blogpost, we will dive into the motivation for the new source connectors, the improvements introduced, and provide migration guidance for users.
 Dependencies # Connector API Dependency Usage Amazon Kinesis Data Streams source DataStream
@@ -758,7 +768,7 @@
 The diagram below illustrates what could happen when records with a given ordering within the same partition key are written to the stream.
 Fig. 2 - Illustration of record distribution within a Kinesis Data Stream after a resharding operation. As we can see, to ensure that records from the pk2 are read in order, we need to ensure that the shards are read in order of Shard 0, then Shard 3, then Shard 6. This can be more easily understood as: All parent shards must be fully read before children shards can be read.
 The new KinesisStreamsSource ensures that parent shards are read completely before reading children shards, and so ensures that record ordering is maintained even after a resharding operation on the stream.
-`}),e.add({id:52,href:"/2024/10/25/apache-flink-kubernetes-operator-1.10.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.10.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.10.0!
+`}),e.add({id:53,href:"/2024/10/25/apache-flink-kubernetes-operator-1.10.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.10.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.10.0!
 The release includes several improvements to the autoscaler, and introduces a new Kubernetes custom resource called FlinkStateSnapshot to manage job snapshots. The process of job upgrades has also been enhanced which makes it possible to now use the last-state upgrade mode with session jobs.
 We encourage you to download the release and share your experience with the community through the Flink mailing lists or JIRA! We&rsquo;re looking forward to your feedback!
 Highlights # FlinkStateSnapshot # With this version comes also a new custom resource called FlinkStateSnapshot. This is used to describe savepoint or checkpoint for a Flink job. The savepoint/checkpoint fields found in FlinkDeployment and FlinkSessionJob status are therefore deprecated, and the operator will create new FlinkStateSnapshot resources for periodic, update and manual savepoints/checkpoints.
@@ -773,7 +783,7 @@
 $ helm repo add flink-kubernetes-operator-1.10.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.10.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.10.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Angela Chen, Ferenc Csaky, Gyula Fora, Mate Czagany, Matyas Orhidi, Naresh Kumar Reddy Gaddam, Roc Marshal, Rui Fan, Sam Barker, Yuepeng Pan, big face cat, chenyuzhi459, kartik-3513, r-sidd, 阿洋
-`}),e.add({id:53,href:"/2024/10/23/preview-release-of-apache-flink-2.0/",title:"Preview Release of Apache Flink 2.0",section:"Flink Blog",content:`The Apache Flink community is actively preparing Flink 2.0, the first major release since Flink 1.0 launched 8 years ago. As a significant milestone, Flink 2.0 is set to introduce numerous innovative features and improvements, along with some compatibility-breaking changes. To facilitate early adaptation to these changes for our users and partner projects (e.g., connectors), and to offer a sneak peek into the exciting new features while gathering feedback, we are now providing a preview release of Flink 2.0.
+`}),e.add({id:54,href:"/2024/10/23/preview-release-of-apache-flink-2.0/",title:"Preview Release of Apache Flink 2.0",section:"Flink Blog",content:`The Apache Flink community is actively preparing Flink 2.0, the first major release since Flink 1.0 launched 8 years ago. As a significant milestone, Flink 2.0 is set to introduce numerous innovative features and improvements, along with some compatibility-breaking changes. To facilitate early adaptation to these changes for our users and partner projects (e.g., connectors), and to offer a sneak peek into the exciting new features while gathering feedback, we are now providing a preview release of Flink 2.0.
 NOTICE: Flink 2.0 Preview is not a stable release and should not be used in production environments. While this preview includes most of the breaking changes planned for Flink 2.0, the final release may still subject to additional modifications.
 Breaking Changes # API # The following sets of APIs have been completely removed.
 DataSet API. Please migrate to DataStream API, or Table API/SQL if applicable. See also How to Migrate from DataSet to DataStream. Scala DataStream and DataSet API. Please migrate to the Java DataStream API. SourceFuction, SinkFunction and Sink V1. Please migrate to Source and Sink V2. TableSoure and TableSink. Please migrate to DynamicTableSource and DynamicTableSink. See also User-defined Sources &amp; Sinks. TableSchema, TableColumn and Types. Please migrate to Schema, Column and DataTypes respectively. Some deprecated methods have been removed from DataStream API. See also the list of breaking programming APIs.
@@ -800,7 +810,7 @@
 /jobs/:jobid/vertices/:vertexid/subtasks/:subtaskindex/attempts/:attempt
 /jobs/:jobid/vertices/:vertexid/subtasktimes
 /jobs/:jobid/vertices/:vertexid/taskmanagers
-/jobs/:jobid/taskmanagers/:taskmanagerid/log-url In their responses, the &ldquo;host&rdquo;, &ldquo;subtasks.host&rdquo; or &ldquo;taskmanagers.host&rdquo; property is removed. List of removed CLI options # sql-client.sh: -u,--update &lt;SQL update statement&gt; is removed flink-client: run-application action is removed: Please use run -t kubernetes-application to run Kubernetes Application mode `}),e.add({id:54,href:"/2024/09/05/apache-flink-cdc-3.2.0-release-announcement/",title:"Apache Flink CDC 3.2.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is excited to announce the release of Flink CDC 3.2.0! This release aims to improve usability and stability of existing features, including transform and schema evolution. Also, backwards-compatible code and tests have been added to help users upgrade from previous CDC versions more smoothly.
+/jobs/:jobid/taskmanagers/:taskmanagerid/log-url In their responses, the &ldquo;host&rdquo;, &ldquo;subtasks.host&rdquo; or &ldquo;taskmanagers.host&rdquo; property is removed. List of removed CLI options # sql-client.sh: -u,--update &lt;SQL update statement&gt; is removed flink-client: run-application action is removed: Please use run -t kubernetes-application to run Kubernetes Application mode `}),e.add({id:55,href:"/2024/09/05/apache-flink-cdc-3.2.0-release-announcement/",title:"Apache Flink CDC 3.2.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is excited to announce the release of Flink CDC 3.2.0! This release aims to improve usability and stability of existing features, including transform and schema evolution. Also, backwards-compatible code and tests have been added to help users upgrade from previous CDC versions more smoothly.
 Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!
 Highlights # Connectors # New Pipeline Connectors # Flink CDC 3.2.0 introduces 1 new pipeline connector:
 Elasticsearch sink (verified on Elasticsearch 6.8, 7.10, and 8.12) Apache Paimon # Paimon connector has bumped its dependency to an up-to-date version and could benefit from all bug fixes and improvements. Also, there are other improvements including:
@@ -821,7 +831,7 @@
 Complex Routing Rules # Route operator has been improved to allow declaring more complicated route topologies:
 Routing one table to multiple sink tables (broadcasting) is supported now. One can define multiple parallel routing rules in batch with pattern replacing symbols. List of Contributors # We would like to express gratitude to all the contributors working on this release:
 ChengJie1053, ConradJam, FangXiangmin, GOODBOY008, Hang Ruan, He Wang, Hongshun Wang, Jiabao Sun, Joao Boto, Junbo wang, Kunni, Laffery, Leonard Xu, MOBIN, Muhammet Orazov, North Lin, PONYLEE, Paul Lin, Qingsheng Ren, SeungMin, Shawn Huang, Thorne, Wink, Xie Yi, Xin Gong, Zhongmin Qiao, Zmm, gong, gongzhongqiang, hk__lrzy, joyCurry30, lipl, lvyanquan, ouyangwulin, skylines, wuzexian, yanghuaiGit, yux, yuxiqian, 鼎昕
-`}),e.add({id:55,href:"/2024/08/02/announcing-the-release-of-apache-flink-1.20/",title:"Announcing the Release of Apache Flink 1.20",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce the release of Apache Flink 1.20.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 142 people contributed to this release completing 13 FLIPs and 300+ issues. Thank you!
+`}),e.add({id:56,href:"/2024/08/02/announcing-the-release-of-apache-flink-1.20/",title:"Announcing the Release of Apache Flink 1.20",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce the release of Apache Flink 1.20.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 142 people contributed to this release completing 13 FLIPs and 300+ issues. Thank you!
 Let&rsquo;s dive into the highlights.
 Standing on the Eve of Apache Flink 2.0 # Flink 1.0 was released eight years ago. Since several months, the community is actively planning and taking steps towards the next major release. The new 1.20 release is planned to be the last minor release before Flink 2.0, which is anticipated by the end of 2024.
 Start from Flink 1.19, the community has decided to officially deprecate multiple APIs that were approaching end of life for a while. In 1.20, we further sorted through all relevant APIs that might need to be replaced or deprecated to clear the way for the 2.0 release:
@@ -877,7 +887,7 @@
 FLINK-35461 FLINK-35473 Checkpointing Options Recovery Options State Backend Options State Changelog Options Latency-track Options Upgrade Notes # The Flink community tries to ensure that upgrades are as seamless as possible. However, certain changes may require users to make adjustments to certain parts of the program when upgrading to version 1.20. Please refer to the release notes for a comprehensive list of adjustments to make and issues to check during the upgrading process.
 List of Contributors # The Apache Flink community would like to express gratitude to all the contributors who made this release possible:
 Ahmed Hamdy, Alan Sheinberg, Aleksandr Pilipenko, Alexander Fedulov, Andrey Gaskov, Antonio Vespoli, Anupam Aggarwal, Barak Ben-Nathan, Benchao Li, Brad, Cheng Pan, Chesnay Schepler, DamonXue, Danny Cranmer, David Christle, David Moravek, David Schlosnagle, Dawid Wysakowicz, Dian Fu, Dmitriy Linevich, Elphas Toringepi, Emre Kartoglu, Fang Yong, Feng Jin, Ferenc Csaky, Frank Yin, Gabor Somogyi, Gyula Fora, HCTommy, Hangxiang Yu, Hanyu Zheng, Hao Li, Hong Liang Teoh, Hong Teoh, HuangXingBo, Jacky Lau, James Hughes, Jane Chan, Jeyhun Karimov, Jiabao Sun, Jim Hughes, Jing Ge, Jinzhong Li, JunRuiLee, Juntao Hu, JustinLee, Kartikey Pant, Kumar Mallikarjuna, Leonard Xu, Lorenzo Affetti, Luke Chen, Martijn Visser, Mason Chen, Matthias Pohl, Mingliang Liu, Panagiotis Garefalakis, Peter Huang, Peter Vary, Piotr Nowojski, Puneet Duggal, Qinghui Xu, Qingsheng Ren, Ravi Dutt Singh, Robert Metzger, Robert Young, Roc Marshal, Roman, Roman Boyko, Roman Khachatryan, Ron, Rui Fan, Ryan Skraba, Samrat, Sergey Nuyanzin, Shilun Fan, Stefan Richter, SuDewei, Timo Walther, Ufuk Celebi, Vincent Woo, Wang FeiFan, Weijie Guo, Wencong Liu, Wouter Zorgdrager, Xiangyu Feng, Xintong Song, Xuyang, Yanfei Lei, Yangze Guo, Yu Chen, Yubin Li, Yuepeng Pan, Yun Tang, Yuxin Tan, Zakelly, Zhanghao Chen, Zhen Wang, Zhenqiu Huang, Zhu Zhu, Zmm, ammar-master, anupamaggarwal, bvarghese1, caicancai, caodizhou, chenzihao, drymatini, dsaisharath, eason.qin, elon-X, fengli, gongzhongqiang, hejufang, jectpro7, jiangxin, liming.1018, lincoln lee, liuyongvs, lxliyou001, oleksandr.nitavskyi, plugatarev, rmoff, slfan1989, spoon-lz, sunxia, sxnan, sychen, wforget, xiaogang, xingbo, yebukong, yunfengzhou-hub, yunhong, zhouyisha, 马越
-`}),e.add({id:56,href:"/2024/07/02/apache-flink-kubernetes-operator-1.9.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.9.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.9.0!
+`}),e.add({id:57,href:"/2024/07/02/apache-flink-kubernetes-operator-1.9.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.9.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.9.0!
 The release includes many improvements to the autoscaler and standalone autoscaler, as well as memory optimizations to the operator. There was also a lot of progress made to translate documentation to Chinese.
 We encourage you to download the release and share your experience with the community through the Flink mailing lists or JIRA! We&rsquo;re looking forward to your feedback!
 Highlights # Operator Optimizations # Many improvements and fixes are included in this release to reduce overall memory usage of the operator including the introduction of jemalloc as the default memory allocator to reduce memory fragmentation.
@@ -891,17 +901,17 @@
 $ helm repo add flink-kubernetes-operator-1.9.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.9.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.9.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Alexander Fedulov, Anupam Aggarwal, Cancai Cai, ConradJam, Ferenc Csaky, Gabor Somogyi, Gyula Fora, Marton Balassi, Matt Braymer-Hayes, Maximilian Michels, Márton Balassi, Naci Simsek, Rui Fan, Sergey Nuyanzin, Xin Hao, Yarden Shoham, caicancai, chenyuzhi459, gengbiao.gb, luismacosta, nicolas.fraison@datadoghq.com, soulzz, timsn, wenbingshen, zhou-jiang
-`}),e.add({id:57,href:"/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/",title:"Apache Flink CDC 3.1.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink CDC 3.1 series.
+`}),e.add({id:58,href:"/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/",title:"Apache Flink CDC 3.1.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink CDC 3.1 series.
 The release contains fixes for several critical issues and improves compatibilities with Apache Flink. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink CDC 3.1.1.
 Release Notes # Bug # [FLINK-34908] Mysql pipeline to doris and starrocks will lost precision for timestamp [FLINK-35149] Fix DataSinkTranslator#sinkTo ignoring pre-write topology if not TwoPhaseCommittingSink [FLINK-35294] Use source config to check if the filter should be applied in timestamp starting mode [FLINK-35301] Fix deadlock when loading driver classes [FLINK-35323] Only the schema of the first hit table is recorded when the source-table of the transformer hits multiple tables [FLINK-35415] CDC Fails to create sink with Flink 1.19 [FLINK-35430] ZoneId is not passed to DebeziumJsonSerializationSchema [FLINK-35464] Flink CDC 3.1 breaks operator state compatiblity [FLINK-35540] flink-cdc-pipeline-connector-mysql lost table which database and table with the same name Documentation improvement # [FLINK-35527] Polish quickstart guide &amp; clean stale links in docs [FLINK-35545] Miss 3.1.0 version in snapshot flink-cdc doc version list Release Resources # The source artifacts and binaries are available on the Downloads page of the Flink website.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Hongshun Wang, Jiabao Sun, North Lin, Qingsheng Ren, Wink, Xin Gong, gongzhongqiang, joyCurry30, yux, yuxiqian
-`}),e.add({id:58,href:"/2024/06/14/apache-flink-1.19.1-release-announcement/",title:"Apache Flink 1.19.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.19 series.
+`}),e.add({id:59,href:"/2024/06/14/apache-flink-1.19.1-release-announcement/",title:"Apache Flink 1.19.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.19 series.
 This release includes 44 bug fixes, vulnerability fixes, and minor improvements for Flink 1.19. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.19.1.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.19.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.19.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.19.1&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.19.1 Release Notes # Release Notes - Flink - Version 1.19.1 Bug [FLINK-26808] - [flink v1.14.2] Submit jobs via REST API not working after set web.submit.enable: false [FLINK-27741] - Fix NPE when use dense_rank() and rank() in over aggregation [FLINK-28693] - Codegen failed if the watermark is defined on a columnByExpression [FLINK-31223] - sql-client.sh fails to start with ssl enabled [FLINK-32513] - Job in BATCH mode with a significant number of transformations freezes on method StreamGraphGenerator.existsUnboundedSource() [FLINK-32828] - Partition aware watermark not handled correctly shortly after job start up from checkpoint or savepoint [FLINK-33798] - Automatically clean up rocksdb logs when the task failover. [FLINK-34379] - table.optimizer.dynamic-filtering.enabled lead to OutOfMemoryError [FLINK-34517] - environment configs ignored when calling procedure operation [FLINK-34616] - python dist doesn&#39;t clean when open method construct resource [FLINK-34725] - Dockerfiles for release publishing has incorrect config.yaml path [FLINK-34956] - The config type is wrong for Duration [FLINK-35089] - Two input AbstractStreamOperator may throw NPE when receiving RecordAttributes [FLINK-35097] - Table API Filesystem connector with &#39;raw&#39; format repeats last line [FLINK-35098] - Incorrect results for queries like &quot;10 &gt;= y&quot; on tables using Filesystem connector and Orc format [FLINK-35112] - Membership for Row class does not include field names [FLINK-35159] - CreatingExecutionGraph can leak CheckpointCoordinator and cause JM crash [FLINK-35169] - Recycle buffers to freeSegments before releasing data buffer for sort accumulator [FLINK-35217] - Missing fsync in FileSystemCheckpointStorage [FLINK-35351] - Restore from unaligned checkpoints with a custom partitioner fails. [FLINK-35358] - Breaking change when loading artifacts [FLINK-35429] - We don&#39;t need introduce getFlinkConfigurationOptions for SqlGatewayRestEndpointFactory#Context [FLINK-35554] - usrlib is not added to classpath when using containers Improvement [FLINK-34746] - Switching to the Apache CDN for Dockerfile [FLINK-34922] - Exception History should support multiple Global failures [FLINK-34955] - Upgrade commons-compress to 1.26.0 Technical Debt [FLINK-35532] - Prevent Cross-Site Authentication (XSA) attacks on Flink dashboard `}),e.add({id:59,href:"/2024/05/17/apache-flink-cdc-3.1.0-release-announcement/",title:"Apache Flink CDC 3.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink CDC 3.1.0! This is the first release after the community accepted the donation of Flink CDC as a sub-project of Apache Flink, with exciting new features such as transform and table merging. The eco-system of Flink CDC keeps expanding, including new Kafka and Paimon pipeline sinks and enhancement to existing connectors.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.19.1 Release Notes # Release Notes - Flink - Version 1.19.1 Bug [FLINK-26808] - [flink v1.14.2] Submit jobs via REST API not working after set web.submit.enable: false [FLINK-27741] - Fix NPE when use dense_rank() and rank() in over aggregation [FLINK-28693] - Codegen failed if the watermark is defined on a columnByExpression [FLINK-31223] - sql-client.sh fails to start with ssl enabled [FLINK-32513] - Job in BATCH mode with a significant number of transformations freezes on method StreamGraphGenerator.existsUnboundedSource() [FLINK-32828] - Partition aware watermark not handled correctly shortly after job start up from checkpoint or savepoint [FLINK-33798] - Automatically clean up rocksdb logs when the task failover. [FLINK-34379] - table.optimizer.dynamic-filtering.enabled lead to OutOfMemoryError [FLINK-34517] - environment configs ignored when calling procedure operation [FLINK-34616] - python dist doesn&#39;t clean when open method construct resource [FLINK-34725] - Dockerfiles for release publishing has incorrect config.yaml path [FLINK-34956] - The config type is wrong for Duration [FLINK-35089] - Two input AbstractStreamOperator may throw NPE when receiving RecordAttributes [FLINK-35097] - Table API Filesystem connector with &#39;raw&#39; format repeats last line [FLINK-35098] - Incorrect results for queries like &quot;10 &gt;= y&quot; on tables using Filesystem connector and Orc format [FLINK-35112] - Membership for Row class does not include field names [FLINK-35159] - CreatingExecutionGraph can leak CheckpointCoordinator and cause JM crash [FLINK-35169] - Recycle buffers to freeSegments before releasing data buffer for sort accumulator [FLINK-35217] - Missing fsync in FileSystemCheckpointStorage [FLINK-35351] - Restore from unaligned checkpoints with a custom partitioner fails. [FLINK-35358] - Breaking change when loading artifacts [FLINK-35429] - We don&#39;t need introduce getFlinkConfigurationOptions for SqlGatewayRestEndpointFactory#Context [FLINK-35554] - usrlib is not added to classpath when using containers Improvement [FLINK-34746] - Switching to the Apache CDN for Dockerfile [FLINK-34922] - Exception History should support multiple Global failures [FLINK-34955] - Upgrade commons-compress to 1.26.0 Technical Debt [FLINK-35532] - Prevent Cross-Site Authentication (XSA) attacks on Flink dashboard `}),e.add({id:60,href:"/2024/05/17/apache-flink-cdc-3.1.0-release-announcement/",title:"Apache Flink CDC 3.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink CDC 3.1.0! This is the first release after the community accepted the donation of Flink CDC as a sub-project of Apache Flink, with exciting new features such as transform and table merging. The eco-system of Flink CDC keeps expanding, including new Kafka and Paimon pipeline sinks and enhancement to existing connectors.
 We&rsquo;d like to invite you to check out Flink CDC documentation and have a try on the quickstart tutorial to explore the world of Flink CDC. Also we encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA! We hope you like the new release and we’d be eager to learn about your experience with it.
 Highlights # Transformation Support in Pipeline # Flink CDC 3.1.0 introduces the ability of making transformations in the CDC pipeline. By incorporating a transform section within the YAML pipeline definitions, users can now easily apply a variety of transformations to data change event from source, including projections, calculations, and addition of constant columns, enhancing the effectiveness of data integration pipelines. Leveraging an SQL-like syntax for defining these transformations, the new feature ensures that users can quickly adapt to and utilize it.
 You can find examples of using transformations in the Flink CDC documentation.
@@ -916,7 +926,7 @@
 Db2 # Db2 CDC source is now migrated to the unified incremental snapshot framework.
 CLI # Flink CDC pipeline submission CLI now supports recovering a pipeline execution from a specific savepoint file by using command line argument --from-savepoint
 List of Contributors # Check Null, FocusComputing, GOODBOY008, Hang Ruan, He Wang, Hongshun Wang, Jiabao Sun, Kunni, L, Laffery, Leonard Xu, Muhammet Orazov, Paul Lin, PengFei Li, Qingsheng Ren, Qishang Zhong, Shawn Huang, Thorne, TorinJie, Xianxun Ye, Xin Gong, Yaroslav Tkachenko, e-mhui, gongzhongqiang, joyCurry30, kunni, lzshlzsh, qwding, shikai93, sky, skylines, wenmo, wudi, xleoken, xuzifu666, yanghuaiGit, yux, yuxiqian, 张田
-`}),e.add({id:60,href:"/2024/03/21/apache-flink-kubernetes-operator-1.8.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.8.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.8.0!
+`}),e.add({id:61,href:"/2024/03/21/apache-flink-kubernetes-operator-1.8.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.8.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.8.0!
 The release includes many improvements to the operator core, the autoscaler, and introduces new features like TaskManager memory auto-tuning.
 We encourage you to download the release and share your experience with the community through the Flink mailing lists or JIRA! We&rsquo;re looking forward to your feedback!
 Highlights # Flink Autotuning # We&rsquo;re excited to announce our latest addition to the autoscaling module: Flink Autotuning.
@@ -948,7 +958,7 @@
 $ helm repo add flink-kubernetes-operator-1.8.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.8.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.8.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # 1996fanrui, Alexander Fedulov, AncyRominus, Caican Cai, Cancai Cai, ConradJam, Domenic Bove, Dominik Dębowczyk, Gabor Somogyi, Guillaume Vauvert, Gyula Fora, Hao Xin, Jerry Wang, Justin Chen, Máté Czagány, Maximilian Michels, Peter Huang, Rui Fan, Ryan van Huuksloot, Samrat, Tony Garrard, Yang-LI-CS, ensctom, fengfei02, flashJd, Nicolas Fraison
-`}),e.add({id:61,href:"/2024/03/18/announcing-the-release-of-apache-flink-1.19/",title:"Announcing the Release of Apache Flink 1.19",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce the release of Apache Flink 1.19.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 162 people contributed to this release completing 33 FLIPs and 600+ issues. Thank you!
+`}),e.add({id:62,href:"/2024/03/18/announcing-the-release-of-apache-flink-1.19/",title:"Announcing the Release of Apache Flink 1.19",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce the release of Apache Flink 1.19.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 162 people contributed to this release completing 33 FLIPs and 600+ issues. Thank you!
 Let&rsquo;s dive into the highlights.
 Flink SQL Improvements # Custom Parallelism for Table/SQL Sources # Now in Flink 1.19, you can set a custom parallelism for performance tuning via the scan.parallelism option. The first available connector is DataGen (Kafka connector is on the way). Here is an example using SQL Client:
 -- set parallelism within the ddl CREATE TABLE Orders ( order_number BIGINT, price DECIMAL(32,2), buyer ROW&lt;first_name STRING, last_name STRING&gt;, order_time TIMESTAMP(3) ) WITH ( &#39;connector&#39; = &#39;datagen&#39;, &#39;scan.parallelism&#39; = &#39;4&#39; ); -- or set parallelism via dynamic table option SELECT * FROM Orders /*+ OPTIONS(&#39;scan.parallelism&#39;=&#39;4&#39;) */; More Information
@@ -1003,20 +1013,20 @@
 Migrate RuntimeContext#getExecutionConfig.isObjectReuseEnabled() to RuntimeContext#isObjectReuseEnabled org.apache.flink.api.common.functions.RichFunction#open(Configuration parameters) method has been deprecated and will be removed in future versions. Users are encouraged to migrate to the new RichFunction#open(OpenContext openContext). org.apache.flink.configuration.AkkaOptions is deprecated and replaced with RpcOptions. Upgrade Notes # The Flink community tries to ensure that upgrades are as seamless as possible. However, certain changes may require users to make adjustments to certain parts of the program when upgrading to version 1.19. Please refer to the release notes for a comprehensive list of adjustments to make and issues to check during the upgrading process.
 List of Contributors # The Apache Flink community would like to express gratitude to all the contributors who made this release possible:
 Adi Polak, Ahmed Hamdy, Akira Ajisaka, Alan Sheinberg, Aleksandr Pilipenko, Alex Wu, Alexander Fedulov, Archit Goyal, Asha Boyapati, Benchao Li, Bo Cui, Cheena Budhiraja, Chesnay Schepler, Dale Lane, Danny Cranmer, David Moravek, Dawid Wysakowicz, Deepyaman Datta, Dian Fu, Dmitriy Linevich, Elkhan Dadashov, Eric Brzezenski, Etienne Chauchot, Fang Yong, Feng Jiajie, Feng Jin, Ferenc Csaky, Gabor Somogyi, Gyula Fora, Hang Ruan, Hangxiang Yu, Hanyu Zheng, Hjw, Hong Liang Teoh, Hongshun Wang, HuangXingBo, Jack, Jacky Lau, James Hughes, Jane Chan, Jerome Gagnon, Jeyhun Karimov, Jiabao Sun, JiangXin, Jiangjie (Becket) Qin, Jim Hughes, Jing Ge, Jinzhong Li, JunRuiLee, Laffery, Leonard Xu, Lijie Wang, Martijn Visser, Marton Balassi, Matt Wang, Matthias Pohl, Matthias Schwalbe, Matyas Orhidi, Maximilian Michels, Mingliang Liu, Máté Czagány, Panagiotis Garefalakis, ParyshevSergey, Patrick Lucas, Peter Huang, Peter Vary, Piotr Nowojski, Prabhu Joseph, Pranav Sharma, Qingsheng Ren, Robin Moffatt, Roc Marshal, Rodrigo Meneses, Roman, Roman Khachatryan, Ron, Rui Fan, Ruibin Xing, Ryan Skraba, Samrat002, Sergey Nuyanzin, Shammon FY, Shengkai, Stefan Richter, SuDewei, TBCCC, Tartarus0zm, Thomas Weise, Timo Walther, Varun, Venkata krishnan Sowrirajan, Vladimir Matveev, Wang FeiFan, Weihua Hu, Weijie Guo, Wencong Liu, Xiangyu Feng, Xianxun Ye, Xiaogang Zhou, Xintong Song, XuShuai, Xuyang, Yanfei Lei, Yangze Guo, Yi Zhang, Yu Chen, Yuan Mei, Yubin Li, Yuepeng Pan, Yun Gao, Yun Tang, Yuxin Tan, Zakelly, Zhanghao Chen, Zhu Zhu, archzi, bvarghese1, caicancai, caodizhou, dongwoo6kim, duanyc, eason.qin, fengjiajie, fengli, gongzhongqiang, gyang94, hejufang, jiangxin, jiaoqingbo, jingge, lijingwei.5018, lincoln lee, liuyongvs, luoyuxia, mimaomao, murong00, polaris6, pvary, sharath1709, simplejason, sunxia, sxnan, tzy123-123, wangfeifan, wangzzu, xiangyu0xf, xiarui, xingbo, xuyang, yeming, yhx, yinhan.yh, yunfan123, yunfengzhou-hub, yunhong, yuxia Luo, yuxiang, zoudan, 周仁祥, 曹帝胄, 朱通通, 马越
-`}),e.add({id:62,href:"/2024/01/19/apache-flink-1.18.1-release-announcement/",title:"Apache Flink 1.18.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.18 series.
+`}),e.add({id:63,href:"/2024/01/19/apache-flink-1.18.1-release-announcement/",title:"Apache Flink 1.18.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.18 series.
 This release includes 47 bug fixes, vulnerability fixes, and minor improvements for Flink 1.18. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.18.1.
 Note: Users that have state compression should not migrate to 1.18.1 (nor 1.18.0) due to a critical bug that could lead to data loss. Please refer to FLINK-34063 for more information.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.18.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.18.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.18.1&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.18.1 Release Notes # Release Notes - Flink - Version 1.18.1 Bug [FLINK-31650] - Incorrect busyMsTimePerSecond metric value for FINISHED task [FLINK-33158] - Cryptic exception when there is a StreamExecSort in JsonPlan [FLINK-33171] - Consistent implicit type coercion support for equal and non-equal comparisons for codegen [FLINK-33223] - MATCH_RECOGNIZE AFTER MATCH clause can not be deserialised from a compiled plan [FLINK-33225] - Python API incorrectly passes \`JVM_ARGS\` as single argument [FLINK-33313] - RexNodeExtractor fails to extract conditions with binary literal [FLINK-33352] - OpenAPI spec is lacking mappings for discriminator properties [FLINK-33395] - The join hint doesn&#39;t work when appears in subquery [FLINK-33474] - ShowPlan throws undefined exception In Flink Web Submit Page [FLINK-33523] - DataType ARRAY&lt;INT NOT NULL&gt; fails to cast into Object[] [FLINK-33529] - PyFlink fails with &quot;No module named &#39;cloudpickle&quot; [FLINK-33541] - RAND_INTEGER can&#39;t be existed in a IF statement [FLINK-33567] - Flink documentation should only display connector downloads links when a connector is available [FLINK-33588] - Fix Flink Checkpointing Statistics Bug [FLINK-33613] - Python UDF Runner process leak in Process Mode [FLINK-33693] - Force aligned barrier logic doesn&#39;t work when the aligned checkpoint timeout is enabled [FLINK-33752] - When Duration is greater than or equal to 1 day, the display unit is ms. [FLINK-33793] - java.lang.NoSuchMethodError when checkpointing in Google Cloud Storage [FLINK-33872] - Checkpoint history does not display for completed jobs New Feature [FLINK-33071] - Log checkpoint statistics Improvement [FLINK-24819] - Higher APIServer cpu load after using SharedIndexInformer replaced naked Kubernetes watch [FLINK-32611] - Redirect to Apache Paimon&#39;s link instead of legacy flink table store [FLINK-33041] - Add an introduction about how to migrate DataSet API to DataStream [FLINK-33161] - [benchmark] Java17 profile for benchmarks [FLINK-33501] - Rely on Maven wrapper instead of having custom Maven installation logic [FLINK-33598] - Watch HA configmap via name instead of lables to reduce pressure on APIserver `}),e.add({id:63,href:"/2023/11/29/apache-flink-1.16.3-release-announcement/",title:"Apache Flink 1.16.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the third bug fix release of the Flink 1.16 series.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.18.1 Release Notes # Release Notes - Flink - Version 1.18.1 Bug [FLINK-31650] - Incorrect busyMsTimePerSecond metric value for FINISHED task [FLINK-33158] - Cryptic exception when there is a StreamExecSort in JsonPlan [FLINK-33171] - Consistent implicit type coercion support for equal and non-equal comparisons for codegen [FLINK-33223] - MATCH_RECOGNIZE AFTER MATCH clause can not be deserialised from a compiled plan [FLINK-33225] - Python API incorrectly passes \`JVM_ARGS\` as single argument [FLINK-33313] - RexNodeExtractor fails to extract conditions with binary literal [FLINK-33352] - OpenAPI spec is lacking mappings for discriminator properties [FLINK-33395] - The join hint doesn&#39;t work when appears in subquery [FLINK-33474] - ShowPlan throws undefined exception In Flink Web Submit Page [FLINK-33523] - DataType ARRAY&lt;INT NOT NULL&gt; fails to cast into Object[] [FLINK-33529] - PyFlink fails with &quot;No module named &#39;cloudpickle&quot; [FLINK-33541] - RAND_INTEGER can&#39;t be existed in a IF statement [FLINK-33567] - Flink documentation should only display connector downloads links when a connector is available [FLINK-33588] - Fix Flink Checkpointing Statistics Bug [FLINK-33613] - Python UDF Runner process leak in Process Mode [FLINK-33693] - Force aligned barrier logic doesn&#39;t work when the aligned checkpoint timeout is enabled [FLINK-33752] - When Duration is greater than or equal to 1 day, the display unit is ms. [FLINK-33793] - java.lang.NoSuchMethodError when checkpointing in Google Cloud Storage [FLINK-33872] - Checkpoint history does not display for completed jobs New Feature [FLINK-33071] - Log checkpoint statistics Improvement [FLINK-24819] - Higher APIServer cpu load after using SharedIndexInformer replaced naked Kubernetes watch [FLINK-32611] - Redirect to Apache Paimon&#39;s link instead of legacy flink table store [FLINK-33041] - Add an introduction about how to migrate DataSet API to DataStream [FLINK-33161] - [benchmark] Java17 profile for benchmarks [FLINK-33501] - Rely on Maven wrapper instead of having custom Maven installation logic [FLINK-33598] - Watch HA configmap via name instead of lables to reduce pressure on APIserver `}),e.add({id:64,href:"/2023/11/29/apache-flink-1.16.3-release-announcement/",title:"Apache Flink 1.16.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the third bug fix release of the Flink 1.16 series.
 This release includes 52 bug fixes, vulnerability fixes, and minor improvements for Flink 1.16. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.16.3.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.16.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.16.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.16.3&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.16.3 Release Notes # Release Notes - Flink - Version 1.16.3 Bug [FLINK-32316] - Duplicated announceCombinedWatermark task maybe scheduled if jobmanager failover [FLINK-32362] - SourceAlignment announceCombinedWatermark period task maybe lost [FLINK-32411] - SourceCoordinator thread leaks when job recovers from checkpoint [FLINK-32414] - Watermark alignment will cause flink jobs to hang forever when any source subtask has no SourceSplit [FLINK-32496] - Sources with idleness and alignment always wait for alignment when part of multiple sources is idle [FLINK-27415] - Read empty csv file throws exception in FileSystem table connector [FLINK-28185] - &quot;Invalid negative offset&quot; when using OffsetsInitializer.timestamp(.) [FLINK-29913] - Shared state would be discarded by mistake when maxConcurrentCheckpoint&gt;1 [FLINK-30559] - May get wrong result for \`if\` expression if it&#39;s string data type [FLINK-30596] - Multiple POST /jars/:jarid/run requests with the same jobId, runs duplicate jobs [FLINK-30751] - Remove references to disableDataSync in RocksDB documentation [FLINK-30966] - Flink SQL IF FUNCTION logic error [FLINK-31139] - not upload empty state changelog file [FLINK-31967] - SQL with LAG function NullPointerException [FLINK-32023] - execution.buffer-timeout cannot be set to -1 ms [FLINK-32136] - Pyflink gateway server launch fails when purelib != platlib [FLINK-32172] - KafkaExample can not run with args [FLINK-32199] - MetricStore does not remove metrics of nonexistent parallelism in TaskMetricStore when scale down job parallelism [FLINK-32217] - Retain metric store can cause NPE [FLINK-32254] - FineGrainedSlotManager may not allocate enough taskmanagers if maxSlotNum is configured [FLINK-32296] - Flink SQL handle array of row incorrectly [FLINK-32548] - Make watermark alignment ready for production use [FLINK-32583] - RestClient can deadlock if request made after Netty event executor terminated [FLINK-32592] - (Stream)ExEnv#initializeContextEnvironment isn&#39;t thread-safe [FLINK-32655] - RecreateOnResetOperatorCoordinator did not forward notifyCheckpointAborted to the real OperatorCoordinator [FLINK-32680] - Job vertex names get messed up once there is a source vertex chained with a MultipleInput vertex in job graph [FLINK-32760] - Version Conflict in flink-sql-connector-hive for shaded.parquet prefix packages [FLINK-32888] - File upload runs into EndOfDataDecoderException [FLINK-32909] - The jobmanager.sh pass arguments failed [FLINK-33010] - NPE when using GREATEST() in Flink SQL [FLINK-33149] - Bump snappy-java to 1.1.10.4 [FLINK-33291] - The release profile of Flink does include enforcing the Java version only in a &quot;soft&quot; way Improvement [FLINK-29542] - Unload.md wrongly writes UNLOAD operation as LOAD operation [FLINK-32314] - Ignore class-loading errors after RPC system shutdown [FLINK-32371] - Bump snappy-java to 1.1.10.1 `}),e.add({id:64,href:"/2023/11/29/apache-flink-1.17.2-release-announcement/",title:"Apache Flink 1.17.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.17 series.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.16.3 Release Notes # Release Notes - Flink - Version 1.16.3 Bug [FLINK-32316] - Duplicated announceCombinedWatermark task maybe scheduled if jobmanager failover [FLINK-32362] - SourceAlignment announceCombinedWatermark period task maybe lost [FLINK-32411] - SourceCoordinator thread leaks when job recovers from checkpoint [FLINK-32414] - Watermark alignment will cause flink jobs to hang forever when any source subtask has no SourceSplit [FLINK-32496] - Sources with idleness and alignment always wait for alignment when part of multiple sources is idle [FLINK-27415] - Read empty csv file throws exception in FileSystem table connector [FLINK-28185] - &quot;Invalid negative offset&quot; when using OffsetsInitializer.timestamp(.) [FLINK-29913] - Shared state would be discarded by mistake when maxConcurrentCheckpoint&gt;1 [FLINK-30559] - May get wrong result for \`if\` expression if it&#39;s string data type [FLINK-30596] - Multiple POST /jars/:jarid/run requests with the same jobId, runs duplicate jobs [FLINK-30751] - Remove references to disableDataSync in RocksDB documentation [FLINK-30966] - Flink SQL IF FUNCTION logic error [FLINK-31139] - not upload empty state changelog file [FLINK-31967] - SQL with LAG function NullPointerException [FLINK-32023] - execution.buffer-timeout cannot be set to -1 ms [FLINK-32136] - Pyflink gateway server launch fails when purelib != platlib [FLINK-32172] - KafkaExample can not run with args [FLINK-32199] - MetricStore does not remove metrics of nonexistent parallelism in TaskMetricStore when scale down job parallelism [FLINK-32217] - Retain metric store can cause NPE [FLINK-32254] - FineGrainedSlotManager may not allocate enough taskmanagers if maxSlotNum is configured [FLINK-32296] - Flink SQL handle array of row incorrectly [FLINK-32548] - Make watermark alignment ready for production use [FLINK-32583] - RestClient can deadlock if request made after Netty event executor terminated [FLINK-32592] - (Stream)ExEnv#initializeContextEnvironment isn&#39;t thread-safe [FLINK-32655] - RecreateOnResetOperatorCoordinator did not forward notifyCheckpointAborted to the real OperatorCoordinator [FLINK-32680] - Job vertex names get messed up once there is a source vertex chained with a MultipleInput vertex in job graph [FLINK-32760] - Version Conflict in flink-sql-connector-hive for shaded.parquet prefix packages [FLINK-32888] - File upload runs into EndOfDataDecoderException [FLINK-32909] - The jobmanager.sh pass arguments failed [FLINK-33010] - NPE when using GREATEST() in Flink SQL [FLINK-33149] - Bump snappy-java to 1.1.10.4 [FLINK-33291] - The release profile of Flink does include enforcing the Java version only in a &quot;soft&quot; way Improvement [FLINK-29542] - Unload.md wrongly writes UNLOAD operation as LOAD operation [FLINK-32314] - Ignore class-loading errors after RPC system shutdown [FLINK-32371] - Bump snappy-java to 1.1.10.1 `}),e.add({id:65,href:"/2023/11/29/apache-flink-1.17.2-release-announcement/",title:"Apache Flink 1.17.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.17 series.
 This release includes 82 bug fixes, vulnerability fixes, and minor improvements for Flink 1.17. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.17.2.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.17.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.17.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.17.2&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.17.2 Release Notes # Release Notes - Flink - Version 1.17.2 Bug [FLINK-27415] - Read empty csv file throws exception in FileSystem table connector [FLINK-28513] - Flink Table API CSV streaming sink throws SerializedThrowable exception [FLINK-29913] - Shared state would be discarded by mistake when maxConcurrentCheckpoint&gt;1 [FLINK-30559] - May get wrong result for \`if\` expression if it&#39;s string data type [FLINK-30596] - Multiple POST /jars/:jarid/run requests with the same jobId, runs duplicate jobs [FLINK-30751] - Remove references to disableDataSync in RocksDB documentation [FLINK-30966] - Flink SQL IF FUNCTION logic error [FLINK-31139] - not upload empty state changelog file [FLINK-31519] - The watermark alignment docs is outdated after FLIP-217 finished [FLINK-31812] - SavePoint from /jars/:jarid:/run api on body is not anymore set to null if empty [FLINK-31967] - SQL with LAG function NullPointerException [FLINK-31974] - JobManager crashes after KubernetesClientException exception with FatalExitExceptionHandler [FLINK-32023] - execution.buffer-timeout cannot be set to -1 ms [FLINK-32034] - Python&#39;s DistUtils is deprecated as of 3.10 [FLINK-32056] - Update the used Pulsar connector in flink-python to 4.0.0 [FLINK-32110] - TM native memory leak when using time window in Pyflink ThreadMode [FLINK-32136] - Pyflink gateway server launch fails when purelib != platlib [FLINK-32141] - SharedStateRegistry print too much info log [FLINK-32172] - KafkaExample can not run with args [FLINK-32199] - MetricStore does not remove metrics of nonexistent parallelism in TaskMetricStore when scale down job parallelism [FLINK-32217] - Retain metric store can cause NPE [FLINK-32219] - SQL client hangs when executing EXECUTE PLAN [FLINK-32226] - RestClusterClient leaks jobgraph file if submission fails [FLINK-32249] - A Java string should be used instead of a Calcite NlsString to construct the column comment of CatalogTable [FLINK-32254] - FineGrainedSlotManager may not allocate enough taskmangers if maxSlotNum is configured [FLINK-32296] - Flink SQL handle array of row incorrectly [FLINK-32316] - Duplicated announceCombinedWatermark task maybe scheduled if jobmanager failover [FLINK-32362] - SourceAlignment announceCombinedWatermark period task maybe lost [FLINK-32411] - SourceCoordinator thread leaks when job recovers from checkpoint [FLINK-32414] - Watermark alignment will cause flink jobs to hang forever when any source subtask has no SourceSplit [FLINK-32447] - table hints lost when they inside a view referenced by an external query [FLINK-32456] - JSON_OBJECTAGG &amp; JSON_ARRAYAGG cannot be used with other aggregate functions [FLINK-32465] - KerberosLoginProvider.isLoginPossible does accidental login with keytab [FLINK-32496] - Sources with idleness and alignment always wait for alignment when part of multiple sources is idle [FLINK-32548] - Make watermark alignment ready for production use [FLINK-32578] - Cascaded group by window time columns on a proctime window aggregate may result hang for ever [FLINK-32583] - RestClient can deadlock if request made after Netty event executor terminated [FLINK-32592] - (Stream)ExEnv#initializeContextEnvironment isn&#39;t thread-safe [FLINK-32628] - build_wheels_on_macos fails on AZP [FLINK-32655] - RecreateOnResetOperatorCoordinator did not forward notifyCheckpointAborted to the real OperatorCoordinator [FLINK-32680] - Job vertex names get messed up once there is a source vertex chained with a MultipleInput vertex in job graph [FLINK-32760] - Version Conflict in flink-sql-connector-hive for shaded.parquet prefix packages [FLINK-32876] - ExecutionTimeBasedSlowTaskDetector treats unscheduled tasks as slow tasks and causes speculative execution to fail. [FLINK-32888] - File upload runs into EndOfDataDecoderException [FLINK-32909] - The jobmanager.sh pass arguments failed [FLINK-32962] - Failure to install python dependencies from requirements file [FLINK-32974] - RestClusterClient always leaks flink-rest-client-jobgraphs* directories [FLINK-33010] - NPE when using GREATEST() in Flink SQL [FLINK-33149] - Bump snappy-java to 1.1.10.4 [FLINK-33171] - Consistent implicit type coercion support for equal and non-equal comparisons for codegen [FLINK-33291] - The release profile of Flink does include enforcing the Java version only in a &quot;soft&quot; way [FLINK-33352] - OpenAPI spec is lacking mappings for discriminator properties [FLINK-33442] - UnsupportedOperationException thrown from RocksDBIncrementalRestoreOperation [FLINK-33474] - ShowPlan throws undefined exception In Flink Web Submit Page Improvement [FLINK-31774] - Add document for delete and update statement [FLINK-32186] - Support subtask stack auto-search when redirecting from subtask backpressure tab [FLINK-32304] - Reduce rpc-akka jar size [FLINK-32314] - Ignore class-loading errors after RPC system shutdown [FLINK-32371] - Bump snappy-java to 1.1.10.1 [FLINK-32457] - update current documentation of JSON_OBJECTAGG/JSON_ARRAYAGG to clarify the limitation [FLINK-32458] - support mixed use of JSON_OBJECTAGG &amp; JSON_ARRAYAGG with other aggregate functions [FLINK-32547] - Add missing doc for Timestamp support in ProtoBuf format [FLINK-32758] - PyFlink bounds are overly restrictive and outdated [FLINK-33316] - Avoid unnecessary heavy getStreamOperatorFactory [FLINK-33487] - Add the new Snowflake connector to supported list `}),e.add({id:65,href:"/2023/11/22/apache-flink-kubernetes-operator-1.7.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.7.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.7.0! The release introduces a large number of improvements to the autoscaler, including a complete decoupling from Kubernetes to support more Flink environments in the future. It&rsquo;s important to call out that the release explicitly drops support for Flink 1.13 and 1.14 as agreed by the community.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.17.2 Release Notes # Release Notes - Flink - Version 1.17.2 Bug [FLINK-27415] - Read empty csv file throws exception in FileSystem table connector [FLINK-28513] - Flink Table API CSV streaming sink throws SerializedThrowable exception [FLINK-29913] - Shared state would be discarded by mistake when maxConcurrentCheckpoint&gt;1 [FLINK-30559] - May get wrong result for \`if\` expression if it&#39;s string data type [FLINK-30596] - Multiple POST /jars/:jarid/run requests with the same jobId, runs duplicate jobs [FLINK-30751] - Remove references to disableDataSync in RocksDB documentation [FLINK-30966] - Flink SQL IF FUNCTION logic error [FLINK-31139] - not upload empty state changelog file [FLINK-31519] - The watermark alignment docs is outdated after FLIP-217 finished [FLINK-31812] - SavePoint from /jars/:jarid:/run api on body is not anymore set to null if empty [FLINK-31967] - SQL with LAG function NullPointerException [FLINK-31974] - JobManager crashes after KubernetesClientException exception with FatalExitExceptionHandler [FLINK-32023] - execution.buffer-timeout cannot be set to -1 ms [FLINK-32034] - Python&#39;s DistUtils is deprecated as of 3.10 [FLINK-32056] - Update the used Pulsar connector in flink-python to 4.0.0 [FLINK-32110] - TM native memory leak when using time window in Pyflink ThreadMode [FLINK-32136] - Pyflink gateway server launch fails when purelib != platlib [FLINK-32141] - SharedStateRegistry print too much info log [FLINK-32172] - KafkaExample can not run with args [FLINK-32199] - MetricStore does not remove metrics of nonexistent parallelism in TaskMetricStore when scale down job parallelism [FLINK-32217] - Retain metric store can cause NPE [FLINK-32219] - SQL client hangs when executing EXECUTE PLAN [FLINK-32226] - RestClusterClient leaks jobgraph file if submission fails [FLINK-32249] - A Java string should be used instead of a Calcite NlsString to construct the column comment of CatalogTable [FLINK-32254] - FineGrainedSlotManager may not allocate enough taskmangers if maxSlotNum is configured [FLINK-32296] - Flink SQL handle array of row incorrectly [FLINK-32316] - Duplicated announceCombinedWatermark task maybe scheduled if jobmanager failover [FLINK-32362] - SourceAlignment announceCombinedWatermark period task maybe lost [FLINK-32411] - SourceCoordinator thread leaks when job recovers from checkpoint [FLINK-32414] - Watermark alignment will cause flink jobs to hang forever when any source subtask has no SourceSplit [FLINK-32447] - table hints lost when they inside a view referenced by an external query [FLINK-32456] - JSON_OBJECTAGG &amp; JSON_ARRAYAGG cannot be used with other aggregate functions [FLINK-32465] - KerberosLoginProvider.isLoginPossible does accidental login with keytab [FLINK-32496] - Sources with idleness and alignment always wait for alignment when part of multiple sources is idle [FLINK-32548] - Make watermark alignment ready for production use [FLINK-32578] - Cascaded group by window time columns on a proctime window aggregate may result hang for ever [FLINK-32583] - RestClient can deadlock if request made after Netty event executor terminated [FLINK-32592] - (Stream)ExEnv#initializeContextEnvironment isn&#39;t thread-safe [FLINK-32628] - build_wheels_on_macos fails on AZP [FLINK-32655] - RecreateOnResetOperatorCoordinator did not forward notifyCheckpointAborted to the real OperatorCoordinator [FLINK-32680] - Job vertex names get messed up once there is a source vertex chained with a MultipleInput vertex in job graph [FLINK-32760] - Version Conflict in flink-sql-connector-hive for shaded.parquet prefix packages [FLINK-32876] - ExecutionTimeBasedSlowTaskDetector treats unscheduled tasks as slow tasks and causes speculative execution to fail. [FLINK-32888] - File upload runs into EndOfDataDecoderException [FLINK-32909] - The jobmanager.sh pass arguments failed [FLINK-32962] - Failure to install python dependencies from requirements file [FLINK-32974] - RestClusterClient always leaks flink-rest-client-jobgraphs* directories [FLINK-33010] - NPE when using GREATEST() in Flink SQL [FLINK-33149] - Bump snappy-java to 1.1.10.4 [FLINK-33171] - Consistent implicit type coercion support for equal and non-equal comparisons for codegen [FLINK-33291] - The release profile of Flink does include enforcing the Java version only in a &quot;soft&quot; way [FLINK-33352] - OpenAPI spec is lacking mappings for discriminator properties [FLINK-33442] - UnsupportedOperationException thrown from RocksDBIncrementalRestoreOperation [FLINK-33474] - ShowPlan throws undefined exception In Flink Web Submit Page Improvement [FLINK-31774] - Add document for delete and update statement [FLINK-32186] - Support subtask stack auto-search when redirecting from subtask backpressure tab [FLINK-32304] - Reduce rpc-akka jar size [FLINK-32314] - Ignore class-loading errors after RPC system shutdown [FLINK-32371] - Bump snappy-java to 1.1.10.1 [FLINK-32457] - update current documentation of JSON_OBJECTAGG/JSON_ARRAYAGG to clarify the limitation [FLINK-32458] - support mixed use of JSON_OBJECTAGG &amp; JSON_ARRAYAGG with other aggregate functions [FLINK-32547] - Add missing doc for Timestamp support in ProtoBuf format [FLINK-32758] - PyFlink bounds are overly restrictive and outdated [FLINK-33316] - Avoid unnecessary heavy getStreamOperatorFactory [FLINK-33487] - Add the new Snowflake connector to supported list `}),e.add({id:66,href:"/2023/11/22/apache-flink-kubernetes-operator-1.7.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.7.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.7.0! The release introduces a large number of improvements to the autoscaler, including a complete decoupling from Kubernetes to support more Flink environments in the future. It&rsquo;s important to call out that the release explicitly drops support for Flink 1.13 and 1.14 as agreed by the community.
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA! We hope you like the new release and we’d be eager to learn about your experience with it.
 Flink Version Support Policy Change # Previously the operator only added more and more supported Flink versions without a policy to remove support for these in the future. This resulted in a lot of legacy codepaths already in the core logic.
 To keep technical debt at reasonable levels, the community decided to adopt a new Flink version support policy for the operator.
@@ -1038,14 +1048,14 @@
 $ helm repo add flink-kubernetes-operator-1.7.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.7.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.7.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Alexander Fedulov, Clara Xiong, Daren Wong, Dongwoo Kim, Gabor Somogyi, Gyula Fora, Manan Mangal, Maximilian Michels, Nicolas Fraison, Peter Huang, Praneeth Ramesh, Rui Fan, Sergey Nuyanzin, SteNicholas, Zhanghao, Zhenqiu Huang, mehdid93
-`}),e.add({id:66,href:"/2023/10/27/apache-flink-kubernetes-operator-1.6.1-release-announcement/",title:"Apache Flink Kubernetes Operator 1.6.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink Kubernetes Operator 1.6 series.
+`}),e.add({id:67,href:"/2023/10/27/apache-flink-kubernetes-operator-1.6.1-release-announcement/",title:"Apache Flink Kubernetes Operator 1.6.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink Kubernetes Operator 1.6 series.
 The release contains fixes for several critical issues, and some doc improvements for the autoscaler.
 We highly recommend all users to upgrade to Flink Kubernetes Operator 1.6.1.
 Release Notes # Bug # [FLINK-32890] Correct HA patch check for zookeeper metadata store [FLINK-33011] Never accidentally delete HA metadata for last state deployments Documentation improvement # [FLINK-32868][docs] Document the need to backport FLINK-30213 for using autoscaler with older version Flinks [docs][autoscaler] Autoscaler docs and default config improvement Release Resources # The source artifacts and helm chart are available on the Downloads page of the Flink website. You can easily try out the new features shipped in the official 1.6.1 release by adding the Helm chart to your own local registry:
 $ helm repo add flink-kubernetes-operator-1.6.1 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.6.1/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.6.1/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Gyula Fora, Nicolas Fraison, Zhanghao
-`}),e.add({id:67,href:"/2023/10/24/announcing-the-release-of-apache-flink-1.18/",title:"Announcing the Release of Apache Flink 1.18",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce the release of Apache Flink 1.18.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 174 people contributed to this release completing 18 FLIPS and 700+ issues. Thank you!
+`}),e.add({id:68,href:"/2023/10/24/announcing-the-release-of-apache-flink-1.18/",title:"Announcing the Release of Apache Flink 1.18",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce the release of Apache Flink 1.18.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 174 people contributed to this release completing 18 FLIPS and 700+ issues. Thank you!
 Let&rsquo;s dive into the highlights.
 Towards a Streaming Lakehouse # Flink SQL Improvements # Introduce Flink JDBC Driver For SQL Gateway # Flink 1.18 comes with a JDBC Driver for the Flink SQL Gateway. So, you can now use any SQL Client that supports JDBC to interact with your tables via Flink SQL. Here is an example using SQLLine.
 sqlline&gt; !connect jdbc:flink://localhost:8083 sqlline version 1.12.0 sqlline&gt; !connect jdbc:flink://localhost:8083 Enter username for jdbc:flink://localhost:8083: Enter password for jdbc:flink://localhost:8083: 0: jdbc:flink://localhost:8083&gt; CREATE TABLE T( . . . . . . . . . . . . . . .)&gt; a INT, . . . . . . . . . . . . . . .)&gt; b VARCHAR(10) . . . . . . . . . . . . . . .)&gt; ) WITH ( . . . . . . . . . . . . . . .)&gt; &#39;connector&#39; = &#39;filesystem&#39;, . . . . . . . . . . . . . . .)&gt; &#39;path&#39; = &#39;file:///tmp/T.csv&#39;, . . . . . . . . . . . . . . .)&gt; &#39;format&#39; = &#39;csv&#39; . . . . . . . . . . . . . . .)&gt; ); No rows affected (0.122 seconds) 0: jdbc:flink://localhost:8083&gt; INSERT INTO T VALUES (1, &#39;Hi&#39;), (2, &#39;Hello&#39;); +----------------------------------+ | job id | +----------------------------------+ | fbade1ab4450fc57ebd5269fdf60dcfd | +----------------------------------+ 1 row selected (1.282 seconds) 0: jdbc:flink://localhost:8083&gt; SELECT * FROM T; +---+-------+ | a | b | +---+-------+ | 1 | Hi | | 2 | Hello | +---+-------+ 2 rows selected (1.955 seconds) 0: jdbc:flink://localhost:8083&gt; More Information
@@ -1102,7 +1112,7 @@
 SourceFunction is now officially deprecated and will be dropped in Flink 2.0. If you are still using a connector that is built on top of SourceFunction please migrate it to Source. SinkFunction is not officially deprecated, but it is also approaching end-of-life and will be superseded by SinkV2. Queryable State is now officially deprecated and will be dropped in Flink 2.0. The DataSet API is now officially deprecated. Users are recommended to migrate to the DataStream API with execution mode BATCH. Upgrade Notes # The Flink community tries to ensure that upgrades are as seamless as possible. However, certain changes may require users to make adjustments to certain parts of the program when upgrading to version 1.18. Please refer to the release notes for a comprehensive list of adjustments to make and issues to check during the upgrading process.
 List of Contributors # The Apache Flink community would like to express gratitude to all the contributors who made this release possible:
 Aitozi, Akinfolami Akin-Alamu, Alain Brown, Aleksandr Pilipenko, Alexander Fedulov, Anton Kalashnikov, Archit Goyal, Bangui Dunn, Benchao Li, BoYiZhang, Chesnay Schepler, Chris Nauroth, Colten Pilgreen, Danny Cranmer, David Christle, David Moravek, Dawid Wysakowicz, Deepyaman Datta, Dian Fu, Dian Qi, Dong Lin, Eric Xiao, Etienne Chauchot, Feng Jin, Ferenc Csaky, Fruzsina Nagy, Gabor Somogyi, Gunnar Morling, Gyula Fora, HaiYang Chen, Hang Ruan, Hangxiang Yu, Hanyu Zheng, Hong Liang Teoh, Hongshun Wang, Huston, Jacky Lau, James Hughes, Jane Chan, Jark Wu, Jayadeep Jayaraman, Jia Liu, JiangXin, Joao Boto, Junrui Lee, Juntao Hu, K.I. (Dennis) Jung, Kaiqi Dong, L, Leomax_Sun, Leonard Xu, Licho, Lijie Wang, Liu Jiangang, Lyn Zhang, Maomao Min, Martijn Visser, Marton Balassi, Mason Chen, Matthew de Detrich, Matthias Pohl, Min, Mingliang Liu, Mohsen Rezaei, Mrart, Mulavar, Nicholas Jiang, Nicolas Fraison, Noah, Panagiotis Garefalakis, Patrick Lucas, Paul Lin, Peter Vary, Piotr Nowojski, Qingsheng Ren, Ran Tao, Rich Bowen, Robert Metzger, Roc Marshal, Roman Khachatryan, Ron, Rui Fan, Ryan Skraba, Samrat002, Sergey Nuyanzin, Sergio Morales, Shammon FY, ShammonFY, Shengkai, Shuiqiang Chen, Stefan Richter, Tartarus0zm, Timo Walther, Tzu-Li (Gordon) Tai, Venkata krishnan Sowrirajan, Wang FeiFan, Weihua Hu, Weijie Guo, Wencong Liu, Xiaogang Zhou, Xintong Song, XuShuai, Yanfei Lei, Yu Chen, Yubin Li, Yun Gao, Yun Tang, Yuxin Tan, Zakelly, Zhanghao Chen, ZhengYiWeng, Zhu Zhu, archzi, baiwuchang, cailiuyang, chenyuzhi, darenwkt, dongwoo kim, eason.qin, felixzh, fengli, frankeshi, fredia, godfrey he, haishui, hehuiyuan, huangxingbo, jiangxin, jiaoqingbo, jinfeng, jingge, kevin.cyj, kristoffSC, leixin, leiyanfei, liming.1018, lincoln lee, lincoln.lil, liujiangang, liuyongvs, luoyuxia, maigeiye, mas-chen, novakov-alexey, oleksandr.nitavskyi, pegasas, sammieliu, shammon, shammon FY, shuiqiangchen, slfan1989, sunxia, tison, tsreaper, wangfeifan, wangkang, whjshj, wuqqq, xiangyu0xf, xincheng.ljr, xmzhou, xuyu, xzw, yuanweining, yuchengxin, yunfengzhou-hub, yunhong, yuxia Luo, yuxiqian, zekai-li, zhangmang, zhengyunhong.zyh, zzzzzzzs, 沈嘉琦
-`}),e.add({id:68,href:"/2023/09/19/stateful-functions-3.3.0-release-announcement/",title:"Stateful Functions 3.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Stateful Functions 3.3.0!
+`}),e.add({id:69,href:"/2023/09/19/stateful-functions-3.3.0-release-announcement/",title:"Stateful Functions 3.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Stateful Functions 3.3.0!
 Stateful Functions is a cross-platform stack for building Stateful Serverless applications, making it radically simpler to develop scalable, consistent, and elastic distributed applications. This new release upgrades the Flink runtime to 1.16.2.
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website, and the most recent Java SDK, Python SDK,, GoLang SDK and JavaScript SDK distributions are available on Maven, PyPI, Github, and npm respectively. You can also find official StateFun Docker images of the new version on Dockerhub.
 For more details, check the complete release notes and the updated documentation. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA!
@@ -1112,7 +1122,7 @@
 Release Notes # Please review the release notes for a detailed list of changes and new features if you plan to upgrade your setup to Stateful Functions 3.3.0.
 List of Contributors # Till Rohrmann, Mingmin Xu, Igal Shilman, Martijn Visser, Chesnay Schepler, SiddiqueAhmad, Galen Warren, Seth Wiesman, FilKarnicki, Tzu-Li (Gordon) Tai
 If you’d like to get involved, we’re always looking for new contributors.
-`}),e.add({id:69,href:"/2023/08/15/apache-flink-kubernetes-operator-1.6.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.6.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.6.0! The release features a large number of improvements all across the operator.
+`}),e.add({id:70,href:"/2023/08/15/apache-flink-kubernetes-operator-1.6.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.6.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.6.0! The release features a large number of improvements all across the operator.
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA! We hope you like the new release and we’d be eager to learn about your experience with it.
 Highlights # Improved and simplified rollback mechanism # Previously the rollback mechanism had some serious limitations always requiring the presence of HA metadata. This prevented rollbacks in many cases for instance when the new application terminally failed after the upgrade.
 1.6.0 introduces several core improvements to the rollback mechanism to leverage the robust upgrade flow and cover a much wider range of failure scenarios.
@@ -1130,7 +1140,7 @@
 $ helm repo add flink-kubernetes-operator-1.6.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.6.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.6.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Alexander Fedulov, ConradJam, Fangbin Sun, Gyula Fora, James Busche, Mate Czagany, Matyas Orhidi, Maximilian Michels, Nicolas Fraison, Oleksandr Nitavskyi, Tamir Sagi, Thomas, Xin Hao, Xingcan Cui, Daren Wong, Fabio Wanner, kenankule, llussy, yangjf2019,
-`}),e.add({id:70,href:"/2023/08/04/announcing-three-new-apache-flink-connectors-the-new-connector-versioning-strategy-and-externalization/",title:"Announcing three new Apache Flink connectors, the new connector versioning strategy and externalization",section:"Flink Blog",content:` New connectors # We&rsquo;re excited to announce that Apache Flink now supports three new connectors: Amazon DynamoDB, MongoDB and OpenSearch! The connectors are available for both the DataStream and Table/SQL APIs.
+`}),e.add({id:71,href:"/2023/08/04/announcing-three-new-apache-flink-connectors-the-new-connector-versioning-strategy-and-externalization/",title:"Announcing three new Apache Flink connectors, the new connector versioning strategy and externalization",section:"Flink Blog",content:` New connectors # We&rsquo;re excited to announce that Apache Flink now supports three new connectors: Amazon DynamoDB, MongoDB and OpenSearch! The connectors are available for both the DataStream and Table/SQL APIs.
 Amazon DynamoDB - This connector includes a sink that provides at-least-once delivery guarantees. MongoDB connector - This connector includes a source and sink that provide at-least-once guarantees. OpenSearch sink - This connector includes a sink that provides at-least-once guarantees. Connector Date Released Supported Flink Versions Amazon DynamoDB sink 2022-12-02 1.15+ MongoDB connector 2023-03-31 1.16+ OpenSearch sink 2022-12-21 1.16+ List of Contributors # The Apache Flink community would like to express gratitude to all the new connector contributors:
 Andriy Redko, Chesnay Schepler, Danny Cranmer, darenwkt, Hong Liang Teoh, Jiabao Sun, Leonid Ilyevsky, Martijn Visser, nir.tsruya, Sergey Nuyanzin, Weijie Guo, Yuri Gusev, Yuxin Tan
 Externalized connectors # The community has externalized connectors from Flink’s main repository. This was driven to realise the following benefits:
@@ -1139,19 +1149,19 @@
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-connector-dynamodb&lt;/artifactId&gt; &lt;version&gt;4.1.0-1.17&lt;/version&gt; &lt;/dependency&gt; You can find the maven dependency for a connector in the Flink connectors documentation for a specific Flink version. Use the Flink Downloads page to verify which version your connector is compatible with.
 Contributing # Similarly, when creating JIRAs to report issues or to contribute to externalized connectors, the Affects Version/s and Fix Version/s fields should now use the connector version instead of a Flink version. The format should be &lt;connector-name&gt;-&lt;major&gt;.&lt;minor&gt;.&lt;patch&gt;. For example, use opensearch-1.1.0 for the OpenSearch connector. All other fields in the JIRA like Component/s remain the same.
 For more information on how to contribute to externalized connectors, see the Externalized Connector development wiki.
-`}),e.add({id:71,href:"/2023/07/03/sigmod-systems-award-for-apache-flink/",title:"SIGMOD Systems Award for Apache Flink",section:"Flink Blog",content:`Apache Flink received the 2023 SIGMOD Systems Award, which is awarded to an individual or set of individuals to recognize the development of a software or hardware system whose technical contributions have had significant impact on the theory or practice of large-scale data management systems:
+`}),e.add({id:72,href:"/2023/07/03/sigmod-systems-award-for-apache-flink/",title:"SIGMOD Systems Award for Apache Flink",section:"Flink Blog",content:`Apache Flink received the 2023 SIGMOD Systems Award, which is awarded to an individual or set of individuals to recognize the development of a software or hardware system whose technical contributions have had significant impact on the theory or practice of large-scale data management systems:
 The 2023 SIGMOD Systems Award goes to Apache Flink:
 “Apache Flink greatly expanded the use of stream data-processing.”
 Winning of SIGMOD Systems Award indicates the high recognition of Flink&rsquo;s technological advancement and industry influence from academia. This is a significant achievement by the whole community in Apache Flink, including the over 1,400 contributors and many others who contributed in ways beyond code.
-`}),e.add({id:72,href:"/2023/05/25/apache-flink-1.16.2-release-announcement/",title:"Apache Flink 1.16.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.16 series.
+`}),e.add({id:73,href:"/2023/05/25/apache-flink-1.16.2-release-announcement/",title:"Apache Flink 1.16.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.16 series.
 This release includes 104 bug fixes, vulnerability fixes, and minor improvements for Flink 1.16. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.16.2.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.16.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.16.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.16.2&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.16.2 Release Notes # Bug [FLINK-27246] - Code of method &quot;processElement(Lorg/apache/flink/streaming/runtime/streamrecord/StreamRecord;)V&quot; of class &quot;HashAggregateWithKeys$9211&quot; grows beyond 64 KB [FLINK-27800] - addInEdge check state error [FLINK-27848] - ZooKeeperLeaderElectionDriver keeps writing leader information, using up zxid [FLINK-28786] - Cannot run PyFlink 1.16 on MacOS with M1 chip [FLINK-29852] - Adaptive Scheduler duplicates operators for each parallel instance in the Web UI [FLINK-30461] - Some rocksdb sst files will remain forever [FLINK-30462] - DefaultMultipleComponentLeaderElectionService saves wrong leader session ID [FLINK-30477] - Not properly blocking retries when timeout occurs in AsyncWaitOperator [FLINK-30561] - ChangelogStreamHandleReaderWithCache cause FileNotFoundException [FLINK-30567] - Wrong insert overwrite behavior when the table contains uppercase character with Hive dialect [FLINK-30679] - Can not load the data of hive dim table when project-push-down is introduced [FLINK-30792] - clean up not uploaded state changes after materialization complete [FLINK-30803] - PyFlink mishandles script dependencies [FLINK-30864] - Optional pattern at the start of a group pattern not working [FLINK-30876] - Fix ResetTransformationProcessor don&#39;t reset the transformation of ExecNode in BatchExecMultiInput.rootNode [FLINK-30881] - Crictl/Minikube version mismatch causes errors in k8s setup [FLINK-30885] - Optional group pattern starts with non-optional looping pattern get wrong result on followed-by [FLINK-30917] - The user configured max parallelism does not take effect when using adaptive batch scheduler [FLINK-30989] - Configuration table.exec.spill-compression.block-size not take effect in batch job [FLINK-31017] - Early-started partial match timeout not yield completed matches [FLINK-31041] - Build up of pending global failures causes JM instability [FLINK-31042] - AfterMatchSkipStrategy not working on notFollowedBy ended pattern [FLINK-31043] - KeyError exception is thrown in CachedMapState [FLINK-31077] - Trigger checkpoint failed but it were shown as COMPLETED by rest API [FLINK-31083] - Python ProcessFunction with OutputTag cannot be reused [FLINK-31099] - Chained WindowOperator throws NPE in PyFlink ThreadMode [FLINK-31131] - The INITIALIZING of ExecutionState is missed in the state_machine doc [FLINK-31162] - Avoid setting private tokens to AM container context when kerberos delegation token fetch is disabled [FLINK-31182] - CompiledPlan cannot deserialize BridgingSqlFunction with MissingTypeStrategy [FLINK-31183] - Flink Kinesis EFO Consumer can fail to stop gracefully [FLINK-31185] - Python BroadcastProcessFunction not support side output [FLINK-31272] - Duplicate operators appear in the StreamGraph for Python DataStream API jobs [FLINK-31273] - Left join with IS_NULL filter be wrongly pushed down and get wrong join results [FLINK-31283] - Correct the description of building from source with scala version [FLINK-31286] - Python processes are still alive when shutting down a session cluster directly without stopping the jobs [FLINK-31293] - Request memory segment from LocalBufferPool may hanging forever. [FLINK-31305] - KafkaWriter doesn&#39;t wait for errors for in-flight records before completing flush [FLINK-31319] - Kafka new source partitionDiscoveryIntervalMs=0 cause bounded source can not quit [FLINK-31346] - Batch shuffle IO scheduler does not throw TimeoutException if numRequestedBuffers is greater than 0 [FLINK-31386] - Fix the potential deadlock issue of blocking shuffle [FLINK-31414] - exceptions in the alignment timer are ignored [FLINK-31437] - Wrong key &#39;lookup.cache.caching-missing-key&#39; in connector documentation [FLINK-31478] - TypeError: a bytes-like object is required, not &#39;JavaList&#39; is thrown when ds.execute_and_collect() is called on a KeyedStream [FLINK-31503] - &quot;org.apache.beam.sdk.options.PipelineOptionsRegistrar: Provider org.apache.beam.sdk.options.DefaultPipelineOptionsRegistrar not a subtype&quot; is thrown when executing Python UDFs in SQL Client [FLINK-31588] - The unaligned checkpoint type is wrong at subtask level [FLINK-31632] - watermark aligned idle source can&#39;t resume [FLINK-31652] - Flink should handle the delete event if the pod was deleted while pending [FLINK-31653] - Using\`if\` statement for a string subtype of the row type may meet npe in code generated by codegen [FLINK-31657] - ConfigurationInfo generates incorrect openapi schema [FLINK-31670] - ElasticSearch connector&#39;s document was not incorrect linked to external repo [FLINK-31683] - Align the outdated Chinese filesystem connector docs [FLINK-31690] - The current key is not set for KeyedCoProcessOperator [FLINK-31707] - Constant string cannot be used as input arguments of Pandas UDAF [FLINK-31743] - Avoid relocating the RocksDB&#39;s log failure when filename exceeds 255 characters [FLINK-31763] - Convert requested buffers to overdraft buffers when pool size is decreased [FLINK-31959] - Correct the unaligned checkpoint type at checkpoint level [FLINK-31963] - java.lang.ArrayIndexOutOfBoundsException when scaling down with unaligned checkpoints [FLINK-32010] - KubernetesLeaderRetrievalDriver always waits for lease update to resolve leadership [FLINK-32027] - Batch jobs could hang at shuffle phase when max parallelism is really large [FLINK-32029] - FutureUtils.handleUncaughtException swallows exceptions that are caused by the exception handler code Improvement [FLINK-25874] - PyFlink package dependencies conflict [FLINK-29729] - Fix credential info configured in flink-conf.yaml is lost during creating ParquetReader [FLINK-30962] - Improve error messaging when launching py4j gateway server [FLINK-31031] - Disable the output buffer of Python process to make it more convenient for interactive users [FLINK-31227] - Remove &#39;scala version&#39; from file sink modules [FLINK-31651] - Improve logging of granting/revoking leadership in JobMasterServiceLeadershipRunner to INFO level [FLINK-31692] - Integrate MongoDB connector docs into Flink website [FLINK-31703] - Update Flink docs for AWS v4.1.0 [FLINK-31764] - Get rid of numberOfRequestedOverdraftMemorySegments in LocalBufferPool [FLINK-31779] - Track stable branch of externalized connector instead of specific release tag [FLINK-31799] - Python connector download link should refer to the url defined in externalized repository [FLINK-31984] - Savepoint on S3 should be relocatable if entropy injection is not effective [FLINK-32024] - Short code related to externalized connector retrieve version from its own data yaml `}),e.add({id:73,href:"/2023/05/25/apache-flink-1.17.1-release-announcement/",title:"Apache Flink 1.17.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.17 series.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.16.2 Release Notes # Bug [FLINK-27246] - Code of method &quot;processElement(Lorg/apache/flink/streaming/runtime/streamrecord/StreamRecord;)V&quot; of class &quot;HashAggregateWithKeys$9211&quot; grows beyond 64 KB [FLINK-27800] - addInEdge check state error [FLINK-27848] - ZooKeeperLeaderElectionDriver keeps writing leader information, using up zxid [FLINK-28786] - Cannot run PyFlink 1.16 on MacOS with M1 chip [FLINK-29852] - Adaptive Scheduler duplicates operators for each parallel instance in the Web UI [FLINK-30461] - Some rocksdb sst files will remain forever [FLINK-30462] - DefaultMultipleComponentLeaderElectionService saves wrong leader session ID [FLINK-30477] - Not properly blocking retries when timeout occurs in AsyncWaitOperator [FLINK-30561] - ChangelogStreamHandleReaderWithCache cause FileNotFoundException [FLINK-30567] - Wrong insert overwrite behavior when the table contains uppercase character with Hive dialect [FLINK-30679] - Can not load the data of hive dim table when project-push-down is introduced [FLINK-30792] - clean up not uploaded state changes after materialization complete [FLINK-30803] - PyFlink mishandles script dependencies [FLINK-30864] - Optional pattern at the start of a group pattern not working [FLINK-30876] - Fix ResetTransformationProcessor don&#39;t reset the transformation of ExecNode in BatchExecMultiInput.rootNode [FLINK-30881] - Crictl/Minikube version mismatch causes errors in k8s setup [FLINK-30885] - Optional group pattern starts with non-optional looping pattern get wrong result on followed-by [FLINK-30917] - The user configured max parallelism does not take effect when using adaptive batch scheduler [FLINK-30989] - Configuration table.exec.spill-compression.block-size not take effect in batch job [FLINK-31017] - Early-started partial match timeout not yield completed matches [FLINK-31041] - Build up of pending global failures causes JM instability [FLINK-31042] - AfterMatchSkipStrategy not working on notFollowedBy ended pattern [FLINK-31043] - KeyError exception is thrown in CachedMapState [FLINK-31077] - Trigger checkpoint failed but it were shown as COMPLETED by rest API [FLINK-31083] - Python ProcessFunction with OutputTag cannot be reused [FLINK-31099] - Chained WindowOperator throws NPE in PyFlink ThreadMode [FLINK-31131] - The INITIALIZING of ExecutionState is missed in the state_machine doc [FLINK-31162] - Avoid setting private tokens to AM container context when kerberos delegation token fetch is disabled [FLINK-31182] - CompiledPlan cannot deserialize BridgingSqlFunction with MissingTypeStrategy [FLINK-31183] - Flink Kinesis EFO Consumer can fail to stop gracefully [FLINK-31185] - Python BroadcastProcessFunction not support side output [FLINK-31272] - Duplicate operators appear in the StreamGraph for Python DataStream API jobs [FLINK-31273] - Left join with IS_NULL filter be wrongly pushed down and get wrong join results [FLINK-31283] - Correct the description of building from source with scala version [FLINK-31286] - Python processes are still alive when shutting down a session cluster directly without stopping the jobs [FLINK-31293] - Request memory segment from LocalBufferPool may hanging forever. [FLINK-31305] - KafkaWriter doesn&#39;t wait for errors for in-flight records before completing flush [FLINK-31319] - Kafka new source partitionDiscoveryIntervalMs=0 cause bounded source can not quit [FLINK-31346] - Batch shuffle IO scheduler does not throw TimeoutException if numRequestedBuffers is greater than 0 [FLINK-31386] - Fix the potential deadlock issue of blocking shuffle [FLINK-31414] - exceptions in the alignment timer are ignored [FLINK-31437] - Wrong key &#39;lookup.cache.caching-missing-key&#39; in connector documentation [FLINK-31478] - TypeError: a bytes-like object is required, not &#39;JavaList&#39; is thrown when ds.execute_and_collect() is called on a KeyedStream [FLINK-31503] - &quot;org.apache.beam.sdk.options.PipelineOptionsRegistrar: Provider org.apache.beam.sdk.options.DefaultPipelineOptionsRegistrar not a subtype&quot; is thrown when executing Python UDFs in SQL Client [FLINK-31588] - The unaligned checkpoint type is wrong at subtask level [FLINK-31632] - watermark aligned idle source can&#39;t resume [FLINK-31652] - Flink should handle the delete event if the pod was deleted while pending [FLINK-31653] - Using\`if\` statement for a string subtype of the row type may meet npe in code generated by codegen [FLINK-31657] - ConfigurationInfo generates incorrect openapi schema [FLINK-31670] - ElasticSearch connector&#39;s document was not incorrect linked to external repo [FLINK-31683] - Align the outdated Chinese filesystem connector docs [FLINK-31690] - The current key is not set for KeyedCoProcessOperator [FLINK-31707] - Constant string cannot be used as input arguments of Pandas UDAF [FLINK-31743] - Avoid relocating the RocksDB&#39;s log failure when filename exceeds 255 characters [FLINK-31763] - Convert requested buffers to overdraft buffers when pool size is decreased [FLINK-31959] - Correct the unaligned checkpoint type at checkpoint level [FLINK-31963] - java.lang.ArrayIndexOutOfBoundsException when scaling down with unaligned checkpoints [FLINK-32010] - KubernetesLeaderRetrievalDriver always waits for lease update to resolve leadership [FLINK-32027] - Batch jobs could hang at shuffle phase when max parallelism is really large [FLINK-32029] - FutureUtils.handleUncaughtException swallows exceptions that are caused by the exception handler code Improvement [FLINK-25874] - PyFlink package dependencies conflict [FLINK-29729] - Fix credential info configured in flink-conf.yaml is lost during creating ParquetReader [FLINK-30962] - Improve error messaging when launching py4j gateway server [FLINK-31031] - Disable the output buffer of Python process to make it more convenient for interactive users [FLINK-31227] - Remove &#39;scala version&#39; from file sink modules [FLINK-31651] - Improve logging of granting/revoking leadership in JobMasterServiceLeadershipRunner to INFO level [FLINK-31692] - Integrate MongoDB connector docs into Flink website [FLINK-31703] - Update Flink docs for AWS v4.1.0 [FLINK-31764] - Get rid of numberOfRequestedOverdraftMemorySegments in LocalBufferPool [FLINK-31779] - Track stable branch of externalized connector instead of specific release tag [FLINK-31799] - Python connector download link should refer to the url defined in externalized repository [FLINK-31984] - Savepoint on S3 should be relocatable if entropy injection is not effective [FLINK-32024] - Short code related to externalized connector retrieve version from its own data yaml `}),e.add({id:74,href:"/2023/05/25/apache-flink-1.17.1-release-announcement/",title:"Apache Flink 1.17.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.17 series.
 This release includes 75 bug fixes, vulnerability fixes, and minor improvements for Flink 1.17. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.17.1.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.17.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.17.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.17.1&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.17.1 Release Notes # Release Notes - Flink - Version 1.17.1 Bug [FLINK-28786] - Cannot run PyFlink 1.16 on MacOS with M1 chip [FLINK-30989] - Configuration table.exec.spill-compression.block-size not take effect in batch job [FLINK-31131] - The INITIALIZING of ExecutionState is missed in the state_machine doc [FLINK-31165] - Over Agg: The window rank function without order by error in top N query [FLINK-31273] - Left join with IS_NULL filter be wrongly pushed down and get wrong join results [FLINK-31293] - Request memory segment from LocalBufferPool may hanging forever. [FLINK-31305] - KafkaWriter doesn&#39;t wait for errors for in-flight records before completing flush [FLINK-31414] - exceptions in the alignment timer are ignored [FLINK-31424] - NullPointer when using StatementSet for multiple sinks [FLINK-31437] - Wrong key &#39;lookup.cache.caching-missing-key&#39; in connector documentation [FLINK-31478] - TypeError: a bytes-like object is required, not &#39;JavaList&#39; is thrown when ds.execute_and_collect() is called on a KeyedStream [FLINK-31503] - &quot;org.apache.beam.sdk.options.PipelineOptionsRegistrar: Provider org.apache.beam.sdk.options.DefaultPipelineOptionsRegistrar not a subtype&quot; is thrown when executing Python UDFs in SQL Client [FLINK-31541] - Get metrics in Flink WEB UI error [FLINK-31557] - Metric viewUpdater and reporter task in a SingleThreadScheduledExecutor lead to inaccurate PerSecond related metrics [FLINK-31588] - The unaligned checkpoint type is wrong at subtask level [FLINK-31612] - ClassNotFoundException when using GCS path as HA directory [FLINK-31626] - HsSubpartitionFileReaderImpl should recycle skipped read buffers. [FLINK-31628] - ArrayIndexOutOfBoundsException in watermark processing [FLINK-31632] - watermark aligned idle source can&#39;t resume [FLINK-31652] - Flink should handle the delete event if the pod was deleted while pending [FLINK-31653] - Using\`if\` statement for a string subtype of the row type may meet npe in code generated by codegen [FLINK-31657] - ConfigurationInfo generates incorrect openapi schema [FLINK-31670] - ElasticSearch connector&#39;s document was not incorrect linked to external repo [FLINK-31683] - Align the outdated Chinese filesystem connector docs [FLINK-31690] - The current key is not set for KeyedCoProcessOperator [FLINK-31707] - Constant string cannot be used as input arguments of Pandas UDAF [FLINK-31711] - OpenAPI spec omits complete-statement request body [FLINK-31733] - Model name clashes in OpenAPI spec [FLINK-31735] - JobDetailsInfo plan incorrectly documented as string [FLINK-31738] - FlameGraphTypeQueryParameter#Type clashes with java.reflect.Type in generated clients [FLINK-31743] - Avoid relocating the RocksDB&#39;s log failure when filename exceeds 255 characters [FLINK-31758] - Some external connectors sql client jar has a wrong download url in document [FLINK-31763] - Convert requested buffers to overdraft buffers when pool size is decreased [FLINK-31792] - Errors are not reported in the Web UI [FLINK-31818] - parsing error of &#39;security.kerberos.access.hadoopFileSystems&#39; in flink-conf.yaml [FLINK-31834] - Azure Warning: no space left on device [FLINK-31839] - Token delegation fails when both flink-s3-fs-hadoop and flink-s3-fs-presto plugins are used [FLINK-31882] - SqlGateway will throw exception when executing DeleteFromFilterOperation [FLINK-31959] - Correct the unaligned checkpoint type at checkpoint level [FLINK-31962] - libssl not found when running CI [FLINK-31963] - java.lang.ArrayIndexOutOfBoundsException when scaling down with unaligned checkpoints [FLINK-32010] - KubernetesLeaderRetrievalDriver always waits for lease update to resolve leadership [FLINK-32027] - Batch jobs could hang at shuffle phase when max parallelism is really large [FLINK-32029] - FutureUtils.handleUncaughtException swallows exceptions that are caused by the exception handler code Improvement [FLINK-29542] - Unload.md wrongly writes UNLOAD operation as LOAD operation [FLINK-31398] - Don&#39;t wrap with TemporaryClassLoaderContext in OperationExecutor [FLINK-31651] - Improve logging of granting/revoking leadership in JobMasterServiceLeadershipRunner to INFO level [FLINK-31656] - Obtain delegation tokens early to support external file system usage in blob server [FLINK-31692] - Integrate MongoDB connector docs into Flink website [FLINK-31702] - Integrate Opensearch connector docs into Flink docs v1.17/master [FLINK-31703] - Update Flink docs for AWS v4.1.0 [FLINK-31764] - Get rid of numberOfRequestedOverdraftMemorySegments in LocalBufferPool [FLINK-31779] - Track stable branch of externalized connector instead of specific release tag [FLINK-31799] - Python connector download link should refer to the url defined in externalized repository [FLINK-31984] - Savepoint on S3 should be relocatable if entropy injection is not effective [FLINK-32001] - SupportsRowLevelUpdate does not support returning only a part of the columns. [FLINK-32024] - Short code related to externalized connector retrieve version from its own data yaml [FLINK-32099] - create flink_data volume for operations playground [FLINK-32112] - Fix the deprecated state backend sample config in Chinese document Technical Debt [FLINK-31704] - Pulsar docs should be pulled from dedicated branch [FLINK-31705] - Remove Conjars `}),e.add({id:74,href:"/2023/05/17/apache-flink-kubernetes-operator-1.5.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.5.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.5.0! The release focuses on improvements to the job autoscaler that was introduced in the previous release and general operational hardening of the operator.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.17.1 Release Notes # Release Notes - Flink - Version 1.17.1 Bug [FLINK-28786] - Cannot run PyFlink 1.16 on MacOS with M1 chip [FLINK-30989] - Configuration table.exec.spill-compression.block-size not take effect in batch job [FLINK-31131] - The INITIALIZING of ExecutionState is missed in the state_machine doc [FLINK-31165] - Over Agg: The window rank function without order by error in top N query [FLINK-31273] - Left join with IS_NULL filter be wrongly pushed down and get wrong join results [FLINK-31293] - Request memory segment from LocalBufferPool may hanging forever. [FLINK-31305] - KafkaWriter doesn&#39;t wait for errors for in-flight records before completing flush [FLINK-31414] - exceptions in the alignment timer are ignored [FLINK-31424] - NullPointer when using StatementSet for multiple sinks [FLINK-31437] - Wrong key &#39;lookup.cache.caching-missing-key&#39; in connector documentation [FLINK-31478] - TypeError: a bytes-like object is required, not &#39;JavaList&#39; is thrown when ds.execute_and_collect() is called on a KeyedStream [FLINK-31503] - &quot;org.apache.beam.sdk.options.PipelineOptionsRegistrar: Provider org.apache.beam.sdk.options.DefaultPipelineOptionsRegistrar not a subtype&quot; is thrown when executing Python UDFs in SQL Client [FLINK-31541] - Get metrics in Flink WEB UI error [FLINK-31557] - Metric viewUpdater and reporter task in a SingleThreadScheduledExecutor lead to inaccurate PerSecond related metrics [FLINK-31588] - The unaligned checkpoint type is wrong at subtask level [FLINK-31612] - ClassNotFoundException when using GCS path as HA directory [FLINK-31626] - HsSubpartitionFileReaderImpl should recycle skipped read buffers. [FLINK-31628] - ArrayIndexOutOfBoundsException in watermark processing [FLINK-31632] - watermark aligned idle source can&#39;t resume [FLINK-31652] - Flink should handle the delete event if the pod was deleted while pending [FLINK-31653] - Using\`if\` statement for a string subtype of the row type may meet npe in code generated by codegen [FLINK-31657] - ConfigurationInfo generates incorrect openapi schema [FLINK-31670] - ElasticSearch connector&#39;s document was not incorrect linked to external repo [FLINK-31683] - Align the outdated Chinese filesystem connector docs [FLINK-31690] - The current key is not set for KeyedCoProcessOperator [FLINK-31707] - Constant string cannot be used as input arguments of Pandas UDAF [FLINK-31711] - OpenAPI spec omits complete-statement request body [FLINK-31733] - Model name clashes in OpenAPI spec [FLINK-31735] - JobDetailsInfo plan incorrectly documented as string [FLINK-31738] - FlameGraphTypeQueryParameter#Type clashes with java.reflect.Type in generated clients [FLINK-31743] - Avoid relocating the RocksDB&#39;s log failure when filename exceeds 255 characters [FLINK-31758] - Some external connectors sql client jar has a wrong download url in document [FLINK-31763] - Convert requested buffers to overdraft buffers when pool size is decreased [FLINK-31792] - Errors are not reported in the Web UI [FLINK-31818] - parsing error of &#39;security.kerberos.access.hadoopFileSystems&#39; in flink-conf.yaml [FLINK-31834] - Azure Warning: no space left on device [FLINK-31839] - Token delegation fails when both flink-s3-fs-hadoop and flink-s3-fs-presto plugins are used [FLINK-31882] - SqlGateway will throw exception when executing DeleteFromFilterOperation [FLINK-31959] - Correct the unaligned checkpoint type at checkpoint level [FLINK-31962] - libssl not found when running CI [FLINK-31963] - java.lang.ArrayIndexOutOfBoundsException when scaling down with unaligned checkpoints [FLINK-32010] - KubernetesLeaderRetrievalDriver always waits for lease update to resolve leadership [FLINK-32027] - Batch jobs could hang at shuffle phase when max parallelism is really large [FLINK-32029] - FutureUtils.handleUncaughtException swallows exceptions that are caused by the exception handler code Improvement [FLINK-29542] - Unload.md wrongly writes UNLOAD operation as LOAD operation [FLINK-31398] - Don&#39;t wrap with TemporaryClassLoaderContext in OperationExecutor [FLINK-31651] - Improve logging of granting/revoking leadership in JobMasterServiceLeadershipRunner to INFO level [FLINK-31656] - Obtain delegation tokens early to support external file system usage in blob server [FLINK-31692] - Integrate MongoDB connector docs into Flink website [FLINK-31702] - Integrate Opensearch connector docs into Flink docs v1.17/master [FLINK-31703] - Update Flink docs for AWS v4.1.0 [FLINK-31764] - Get rid of numberOfRequestedOverdraftMemorySegments in LocalBufferPool [FLINK-31779] - Track stable branch of externalized connector instead of specific release tag [FLINK-31799] - Python connector download link should refer to the url defined in externalized repository [FLINK-31984] - Savepoint on S3 should be relocatable if entropy injection is not effective [FLINK-32001] - SupportsRowLevelUpdate does not support returning only a part of the columns. [FLINK-32024] - Short code related to externalized connector retrieve version from its own data yaml [FLINK-32099] - create flink_data volume for operations playground [FLINK-32112] - Fix the deprecated state backend sample config in Chinese document Technical Debt [FLINK-31704] - Pulsar docs should be pulled from dedicated branch [FLINK-31705] - Remove Conjars `}),e.add({id:75,href:"/2023/05/17/apache-flink-kubernetes-operator-1.5.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.5.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink Kubernetes Operator 1.5.0! The release focuses on improvements to the job autoscaler that was introduced in the previous release and general operational hardening of the operator.
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA! We hope you like the new release and we’d be eager to learn about your experience with it.
 Autoscaler improvements # Algorithm improvements and better scale down behaviour # The release contains important improvements to the core autoscaling logic. This includes improved stability of scaling decisions (leading to less parallelism oscillations) and better handling of slow or idle streams.
 There are also some fixes related to output ratio computation and propagation that greatly improves the autoscaler on more complex streaming pipelines.
@@ -1169,7 +1179,7 @@
 $ helm repo add flink-kubernetes-operator-1.5.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.5.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.5.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Gyula Fora, Marton Balassi, Mate Czagany, Maximilian Michels, Rafał Boniecki, Rodrigo Meneses, Tamir Sagi, Xin Hao, Xin Li, Zhanghao Chen, Zhenqiu Huang, Daren Wong, Gaurav Miglani, Peter Vary, Tan Kim, yangjf2019
-`}),e.add({id:75,href:"/2023/05/12/howto-test-a-batch-source-with-the-new-source-framework/",title:"Howto test a batch source with the new Source framework",section:"Flink Blog",content:` Introduction # The Flink community has designed a new Source framework based on FLIP-27 lately. This article is the continuation of the howto create a batch source with the new Source framework article . Now it is time to test the created source ! As the previous article, this one was built while implementing the Flink batch source for Cassandra.
+`}),e.add({id:76,href:"/2023/05/12/howto-test-a-batch-source-with-the-new-source-framework/",title:"Howto test a batch source with the new Source framework",section:"Flink Blog",content:` Introduction # The Flink community has designed a new Source framework based on FLIP-27 lately. This article is the continuation of the howto create a batch source with the new Source framework article . Now it is time to test the created source ! As the previous article, this one was built while implementing the Flink batch source for Cassandra.
 Unit testing the source # Testing the serializers # Example Cassandra SplitSerializer and SplitEnumeratorStateSerializer
 In the previous article, we created serializers for Split and SplitEnumeratorState. We should now test them in unit tests. To test serde we create an object, serialize it using the serializer and then deserialize it using the same serializer and finally assert on the equality of the two objects. Thus, hascode() and equals() need to be implemented for the serialized objects.
 Other unit tests # Of course, we also need to unit test low level processing such as query building for example or any processing that does not require a running backend.
@@ -1191,7 +1201,7 @@
 @TestContext TestContextFactory contextFactory = new TestContextFactory(testEnvironment); TestContext implements DataStreamSourceExternalContext:
 We don&rsquo;t connect to the backend at each test case, so the shared resources such as session are created by the backend test environment (test suite scoped). They are then passed to the test context by constructor. It is also in the constructor that we initialize test case backend resources such as test case table. close() : drop the created test case resources getProducedType(): specify the test output type of the source such as a test Pojo for example getConnectorJarPaths(): provide a list of attached jars. Most of the time, we can return an empty list as maven already adds the jars to the test classpath createSource(): here we create the source as a user would have done. It will be provided to the test cases by the Flink test framework createSourceSplitDataWriter(): here we create an ExternalSystemSplitDataWriter responsible for writing test data which comes as a list of produced type objects such as defined in getProducedType() generateTestData(): produce the list of test data that will be given to the ExternalSystemSplitDataWriter. We must make sure that equals() returns false when 2 records of this list belong to different splits. The easier for that is to include the split id into the produced type and make sure equals() and hashcode() are properly implemented to include this field. Contributing the source to Flink # Lately, the Flink community has externalized all the connectors to external repositories that are sub-repositories of the official Apache Flink repository. This is mainly to decouple the release of Flink to the release of the connectors. To distribute the created source, we need to follow this official wiki page .
 Conclusion # This concludes the series of articles about creating a batch source with the new Flink framework. This was needed as, apart from the javadocs, the documentation about testing is missing for now. I hope you enjoyed reading and I hope the Flink community will receive a source PR from you soon :)
-`}),e.add({id:76,href:"/2023/05/09/howto-migrate-a-real-life-batch-pipeline-from-the-dataset-api-to-the-datastream-api/",title:"Howto migrate a real-life batch pipeline from the DataSet API to the DataStream API",section:"Flink Blog",content:` Introduction # The Flink community has been deprecating the DataSet API since version 1.12 as part of the work on FLIP-131: Consolidate the user-facing Dataflow SDKs/APIs (and deprecate the DataSet API) . This blog article illustrates the migration of a real-life batch DataSet pipeline to a batch DataStream pipeline. All the code presented in this article is available in the tpcds-benchmark-flink repo. The use case shown here is extracted from a broader work comparing Flink performances of different APIs by implementing TPCDS queries using these APIs.
+`}),e.add({id:77,href:"/2023/05/09/howto-migrate-a-real-life-batch-pipeline-from-the-dataset-api-to-the-datastream-api/",title:"Howto migrate a real-life batch pipeline from the DataSet API to the DataStream API",section:"Flink Blog",content:` Introduction # The Flink community has been deprecating the DataSet API since version 1.12 as part of the work on FLIP-131: Consolidate the user-facing Dataflow SDKs/APIs (and deprecate the DataSet API) . This blog article illustrates the migration of a real-life batch DataSet pipeline to a batch DataStream pipeline. All the code presented in this article is available in the tpcds-benchmark-flink repo. The use case shown here is extracted from a broader work comparing Flink performances of different APIs by implementing TPCDS queries using these APIs.
 What is TPCDS? # TPC-DS is a decision support benchmark that models several generally applicable aspects of a decision support system. The purpose of TPCDS benchmarks is to provide relevant, objective performance data of Big Data engines to industry users.
 Chosen TPCDS query # The chosen query for this article is Query3 because it contains all the more common analytics operators (filter, join, aggregation, group by, order by, limit). It represents an analytic query on store sales. Its SQL code is presented here:
 SELECT dt.d_year, item.i_brand_id brand_id, item.i_brand brand,SUM(ss_ext_sales_price) sum_agg FROM date_dim dt, store_sales, item WHERE dt.d_date_sk = store_sales.ss_sold_date_sk AND store_sales.ss_item_sk = item.i_item_sk AND item.i_manufact_id = 128 AND dt.d_moy=11 GROUP BY dt.d_year, item.i_brand, item.i_brand_id ORDER BY dt.d_year, sum_agg desc, brand_id LIMIT 100
@@ -1212,7 +1222,7 @@
 Migrating the limit operation # As all the elements of the DataStream were keyed by the same &ldquo;0&rdquo; key, they are kept in the same &quot; group&quot;. So we can implement the SQL LIMIT with a ProcessFunction with a counter that will output only the first 100 elements.
 Migrating the sink operation # As with sources, there were big changes in sinks with recent versions of Flink. We now use the Sink interface that requires an Encoder . But the resulting code is very similar to the one using the DataSet API. It&rsquo;s only that Encoder#encode() method writes bytes when TextOutputFormat.TextFormatter#format() wrote Strings.
 Conclusion # As you saw for the migration of the join operation, the new unified DataStream API has some limitations left in batch mode. In addition, the order by and limit resulting code is quite manual and requires the help of the Flink state API for the migration. For all these reasons, the Flink community recommends to use Flink SQL for batch pipelines. It results in much simpler code, good performance and out-of-the-box analytics capabilities. You could find the equivalent Query3 code that uses the Flink SQL/Table API in the Query3ViaFlinkSQLCSV class .
-`}),e.add({id:77,href:"/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/",title:"Howto create a batch source with the new Source framework",section:"Flink Blog",content:` Introduction # The Flink community has designed a new Source framework based on FLIP-27 lately. Some connectors have migrated to this new framework. This article is a how-to for creating a batch source using this new framework. It was built while implementing the Flink batch source for Cassandra. If you are interested in contributing or migrating connectors, this blog post is for you!
+`}),e.add({id:78,href:"/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/",title:"Howto create a batch source with the new Source framework",section:"Flink Blog",content:` Introduction # The Flink community has designed a new Source framework based on FLIP-27 lately. Some connectors have migrated to this new framework. This article is a how-to for creating a batch source using this new framework. It was built while implementing the Flink batch source for Cassandra. If you are interested in contributing or migrating connectors, this blog post is for you!
 Implementing the source components # The source architecture is depicted in the diagrams below:
 Source # Example Cassandra Source
 The source interface only does the &ldquo;glue&rdquo; between all the other components. Its role is to instantiate all of them and to define the source Boundedness . We also do the source configuration here along with user configuration validation.
@@ -1242,7 +1252,7 @@
 The initial stream size depends on the size of a split.
 Testing the source # For the sake of concision of this article, testing the source will be the object of the next article. Stay tuned !
 Conclusion # This article gathering the implementation field feedback was needed as the javadocs cannot cover all the implementation details for high-performance and maintainable sources. I hope you enjoyed reading and that it gave you the desire to contribute a new connector to the Flink project !
-`}),e.add({id:78,href:"/2023/04/19/apache-flink-ml-2.2.0-release-announcement/",title:"Apache Flink ML 2.2.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink ML 2.2.0! This release focuses on enriching Flink ML&rsquo;s feature engineering algorithms. The library now includes 33 feature engineering algorithms, making it a more comprehensive library for feature engineering tasks.
+`}),e.add({id:79,href:"/2023/04/19/apache-flink-ml-2.2.0-release-announcement/",title:"Apache Flink ML 2.2.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink ML 2.2.0! This release focuses on enriching Flink ML&rsquo;s feature engineering algorithms. The library now includes 33 feature engineering algorithms, making it a more comprehensive library for feature engineering tasks.
 With the addition of these algorithms, we believe Flink ML library is ready for use in production jobs that require feature engineering capabilities, whose input can then be consumed by both offline and online machine learning tasks.
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA! We hope you like the new release and we’d be eager to learn about your experience with it.
 Notable Features # Introduced API and infrastructure for online serving # In machine learning, one of the main goals of model training is to deploy the trained model to perform online inference, where the model server must respond to incoming requests with millisecond-level latency. However, prior releases of Flink ML only supported nearline inference using the Flink runtime, which may not meet the requirements of online inference use-cases.
@@ -1259,7 +1269,7 @@
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website, and the most recent distribution of Flink ML Python package is available on PyPI.
 List of Contributors # The Apache Flink community would like to thank each one of the contributors that have made this release possible:
 Zhipeng Zhang, Dong Lin, Fan Hong, JiangXin, Zsombor Chikan, huangxingbo, taosiyuan163, vacaly, weibozhao, yunfengzhou-hub
-`}),e.add({id:79,href:"/2023/03/23/announcing-the-release-of-apache-flink-1.17/",title:"Announcing the Release of Apache Flink 1.17",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce Apache Flink release 1.17.0. Apache Flink is the leading stream processing standard, and the concept of unified stream and batch data processing is being successfully adopted in more and more companies. Thanks to our excellent community and contributors, Apache Flink continues to grow as a technology and remains one of the most active projects in the Apache Software Foundation. Flink 1.17 had 172 contributors enthusiastically participating and saw the completion of 7 FLIPs and 600+ issues, bringing many exciting new features and improvements to the community.
+`}),e.add({id:80,href:"/2023/03/23/announcing-the-release-of-apache-flink-1.17/",title:"Announcing the Release of Apache Flink 1.17",section:"Flink Blog",content:`The Apache Flink PMC is pleased to announce Apache Flink release 1.17.0. Apache Flink is the leading stream processing standard, and the concept of unified stream and batch data processing is being successfully adopted in more and more companies. Thanks to our excellent community and contributors, Apache Flink continues to grow as a technology and remains one of the most active projects in the Apache Software Foundation. Flink 1.17 had 172 contributors enthusiastically participating and saw the completion of 7 FLIPs and 600+ issues, bringing many exciting new features and improvements to the community.
 Towards Streaming Warehouses # In order to achieve greater efficiency in the realm of streaming warehouse, Flink 1.17 contains substantial improvements to both the performance of batch processing and the semantics of streaming processing. These improvements represent a significant stride towards the creation of a more efficient and streamlined data warehouse, capable of processing large quantities of data in real-time.
 For batch processing, this release includes several new features and improvements:
 Streaming Warehouse API: FLIP-282 introduces the new Delete and Update API in Flink SQL which only works in batch mode. External storage systems like Flink Table Store can implement row-level modification via this new API. The ALTER TABLE syntax is enhanced by including the ability to ADD/MODIFY/DROP columns, primary keys, and watermarks, making it easier for users to maintain their table schema. Batch Execution Improvements: Execution of batch workloads has been significantly improved in Flink 1.17 in terms of performance, stability and usability. Performance wise, a 26% TPC-DS improvement on 10T dataset is achieved with strategy and operator optimizations, such as new join reordering and adaptive local hash aggregation, Hive aggregate functions improvements, and the Hybrid Shuffle Mode enhancements. Stability wise, Speculative Execution now supports all operators, and the Adaptive Batch Scheduler is more robust against data skew. Usability wise, the tuning effort required for batch workloads has been reduced. The is now the default scheduler in batch mode. The Hybrid Shuffle Mode is compatible with Speculative Execution and the Adaptive Batch Scheduler, next to various configuration simplifications. SQL Client/Gateway: Apache Flink 1.17 introduces the &ldquo;gateway mode&rdquo; for SQL Client, allowing users to submit SQL queries to a SQL Gateway for enhanced functionality. Users can use SQL statements to manage job lifecycles, including displaying job information and stopping running jobs. This provides a powerful tool for managing Flink jobs. For stream processing, the following features and improvements are realized:
@@ -1298,11 +1308,11 @@
 Upgrade Notes # The Flink community tries to ensure that upgrades are as seamless as possible. However, certain changes may require users to make adjustments to certain parts of the program when upgrading to version 1.17. Please refer to the release notes for a comprehensive list of adjustments to make and issues to check during the upgrading process.
 List of Contributors # The Apache Flink community would like to express gratitude to all the contributors who made this release possible:
 Ahmed Hamdy, Aitozi, Aleksandr Pilipenko, Alexander Fedulov, Alexander Preuß, Anton Kalashnikov, Arvid Heise, Bo Cui, Brayno, Carlos Castro, ChangZhuo Chen (陳昌倬), Chen Qin, Chesnay Schepler, Clemens, ConradJam, Danny Cranmer, Dawid Wysakowicz, Dian Fu, Dong Lin, Dongjoon Hyun, Elphas Toringepi, Eric Xiao, Fabian Paul, Ferenc Csaky, Gabor Somogyi, Gen Luo, Gunnar Morling, Gyula Fora, Hangxiang Yu, Hong Liang Teoh, HuangXingBo, Jacky Lau, Jane Chan, Jark Wu, Jiale, Jin, Jing Ge, Jinzhong Li, Joao Boto, John Roesler, Jun He, JunRuiLee, Junrui Lee, Juntao Hu, Krzysztof Chmielewski, Leonard Xu, Licho, Lijie Wang, Mark Canlas, Martijn Visser, MartijnVisser, Martin Liu, Marton Balassi, Mason Chen, Matt, Matthias Pohl, Maximilian Michels, Mingliang Liu, Mulavar, Nico Kruber, Noah, Paul Lin, Peter Huang, Piotr Nowojski, Qing Lim, QingWei, Qingsheng Ren, Rakesh, Ran Tao, Robert Metzger, Roc Marshal, Roman Khachatryan, Ron, Rui Fan, Ryan Skraba, Salva Alcántara, Samrat, Samrat Deb, Samrat002, Sebastian Mattheis, Sergey Nuyanzin, Seth Saperstein, Shengkai, Shuiqiang Chen, Smirnov Alexander, Sriram Ganesh, Steven van Rossum, Tartarus0zm, Timo Walther, Venkata krishnan Sowrirajan, Wei Zhong, Weihua Hu, Weijie Guo, Xianxun Ye, Xintong Song, Yash Mayya, YasuoStudyJava, Yu Chen, Yubin Li, Yufan Sheng, Yun Gao, Yun Tang, Yuxin Tan, Zakelly, Zhanghao Chen, Zhenqiu Huang, Zhu Zhu, ZmmBigdata, bzhaoopenstack, chengshuo.cs, chenxujun, chenyuzhi, chenyuzhi459, chenzihao, dependabot[bot], fanrui, fengli, frankeshi, fredia, godfreyhe, gongzhongqiang, harker2015, hehuiyuan, hiscat, huangxingbo, hunter-cloud09, ifndef-SleePy, jeremyber-aws, jiangjiguang, jingge, kevin.cyj, kristoffSC, kurt, laughingman7743, libowen, lincoln lee, lincoln.lil, liujiangang, liujingmao, liuyongvs, liuzhuang2017, luoyuxia, mas-chen, moqimoqidea, muggleChen, noelo, ouyangwulin, ramkrish86, saikikun, sammieliu, shihong90, shuiqiangchen, snuyanzin, sunxia, sxnan, tison, todd5167, tonyzhu918, wangfeifan, wenbingshen, xuyang, yiksanchan, yunfengzhou-hub, yunhong, yuxia Luo, yuzelin, zhangjingcun, zhangmang, zhengyunhong.zyh, zhouli, zoucao, 沈嘉琦
-`}),e.add({id:80,href:"/2023/03/15/apache-flink-1.15.4-release-announcement/",title:"Apache Flink 1.15.4 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the fourth bug fix release of the Flink 1.15 series.
+`}),e.add({id:81,href:"/2023/03/15/apache-flink-1.15.4-release-announcement/",title:"Apache Flink 1.15.4 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the fourth bug fix release of the Flink 1.15 series.
 This release includes 53 bug fixes, vulnerability fixes, and minor improvements for Flink 1.15. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.15.4.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.15.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.15.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.15.4&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.15.4 Release Notes # Bug [FLINK-27341] - TaskManager running together with JobManager are bind to 127.0.0.1 [FLINK-27800] - addInEdge check state error [FLINK-27944] - IO metrics collision happens if a task has union inputs [FLINK-28526] - Fail to lateral join with UDTF from Table with timstamp column [FLINK-28695] - Fail to send partition request to restarted taskmanager [FLINK-28742] - Table.to_pandas fails with lit(&quot;xxx&quot;) [FLINK-28863] - Snapshot result of RocksDB native savepoint should have empty shared-state [FLINK-29231] - PyFlink UDAF produces different results in the same sliding window [FLINK-29234] - Dead lock in DefaultLeaderElectionService [FLINK-30133] - HadoopModuleFactory creates error if the security module cannot be loaded [FLINK-30168] - PyFlink Deserialization Error with Object Array [FLINK-30304] - Possible Deadlock in Kinesis/Firehose/DynamoDB Connector [FLINK-30308] - ClassCastException: class java.io.ObjectStreamClass$Caches$1 cannot be cast to class java.util.Map is showing in the logging when the job shutdown [FLINK-30366] - Python Group Agg failed in cleaning the idle state [FLINK-30461] - Some rocksdb sst files will remain forever [FLINK-30637] - In linux-aarch64 environment, using “is” judgment to match the window type of overwindow have returned incorrect matching results [FLINK-30679] - Can not load the data of hive dim table when project-push-down is introduced [FLINK-30803] - PyFlink mishandles script dependencies [FLINK-30864] - Optional pattern at the start of a group pattern not working [FLINK-30885] - Optional group pattern starts with non-optional looping pattern get wrong result on followed-by [FLINK-31041] - Build up of pending global failures causes JM instability [FLINK-31043] - KeyError exception is thrown in CachedMapState [FLINK-31183] - Flink Kinesis EFO Consumer can fail to stop gracefully [FLINK-31272] - Duplicate operators appear in the StreamGraph for Python DataStream API jobs [FLINK-31283] - Correct the description of building from source with scala version [FLINK-31286] - Python processes are still alive when shutting down a session cluster directly without stopping the jobs Improvement [FLINK-27327] - Add description about changing max parallelism explicitly leads to state incompatibility [FLINK-29155] - Improve default config of grpcServer in Process Mode [FLINK-29639] - Add ResourceId in TransportException for debugging [FLINK-29729] - Fix credential info configured in flink-conf.yaml is lost during creating ParquetReader [FLINK-29966] - Replace and redesign the Python api documentation base [FLINK-30633] - Update AWS SDKv2 to v2.19.14 [FLINK-30724] - Update doc of kafka per-partition watermark to FLIP-27 source [FLINK-30962] - Improve error messaging when launching py4j gateway server [FLINK-31031] - Disable the output buffer of Python process to make it more convenient for interactive users Sub-task [FLINK-30462] - DefaultMultipleComponentLeaderElectionService saves wrong leader session ID `}),e.add({id:81,href:"/2023/02/27/apache-flink-kubernetes-operator-1.4.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.4.0 Release Announcement",section:"Flink Blog",content:`We are proud to announce the latest stable release of the operator. In addition to the expected stability improvements and fixes, the 1.4.0 release introduces the first version of the long-awaited autoscaler module.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.15.4 Release Notes # Bug [FLINK-27341] - TaskManager running together with JobManager are bind to 127.0.0.1 [FLINK-27800] - addInEdge check state error [FLINK-27944] - IO metrics collision happens if a task has union inputs [FLINK-28526] - Fail to lateral join with UDTF from Table with timstamp column [FLINK-28695] - Fail to send partition request to restarted taskmanager [FLINK-28742] - Table.to_pandas fails with lit(&quot;xxx&quot;) [FLINK-28863] - Snapshot result of RocksDB native savepoint should have empty shared-state [FLINK-29231] - PyFlink UDAF produces different results in the same sliding window [FLINK-29234] - Dead lock in DefaultLeaderElectionService [FLINK-30133] - HadoopModuleFactory creates error if the security module cannot be loaded [FLINK-30168] - PyFlink Deserialization Error with Object Array [FLINK-30304] - Possible Deadlock in Kinesis/Firehose/DynamoDB Connector [FLINK-30308] - ClassCastException: class java.io.ObjectStreamClass$Caches$1 cannot be cast to class java.util.Map is showing in the logging when the job shutdown [FLINK-30366] - Python Group Agg failed in cleaning the idle state [FLINK-30461] - Some rocksdb sst files will remain forever [FLINK-30637] - In linux-aarch64 environment, using “is” judgment to match the window type of overwindow have returned incorrect matching results [FLINK-30679] - Can not load the data of hive dim table when project-push-down is introduced [FLINK-30803] - PyFlink mishandles script dependencies [FLINK-30864] - Optional pattern at the start of a group pattern not working [FLINK-30885] - Optional group pattern starts with non-optional looping pattern get wrong result on followed-by [FLINK-31041] - Build up of pending global failures causes JM instability [FLINK-31043] - KeyError exception is thrown in CachedMapState [FLINK-31183] - Flink Kinesis EFO Consumer can fail to stop gracefully [FLINK-31272] - Duplicate operators appear in the StreamGraph for Python DataStream API jobs [FLINK-31283] - Correct the description of building from source with scala version [FLINK-31286] - Python processes are still alive when shutting down a session cluster directly without stopping the jobs Improvement [FLINK-27327] - Add description about changing max parallelism explicitly leads to state incompatibility [FLINK-29155] - Improve default config of grpcServer in Process Mode [FLINK-29639] - Add ResourceId in TransportException for debugging [FLINK-29729] - Fix credential info configured in flink-conf.yaml is lost during creating ParquetReader [FLINK-29966] - Replace and redesign the Python api documentation base [FLINK-30633] - Update AWS SDKv2 to v2.19.14 [FLINK-30724] - Update doc of kafka per-partition watermark to FLIP-27 source [FLINK-30962] - Improve error messaging when launching py4j gateway server [FLINK-31031] - Disable the output buffer of Python process to make it more convenient for interactive users Sub-task [FLINK-30462] - DefaultMultipleComponentLeaderElectionService saves wrong leader session ID `}),e.add({id:82,href:"/2023/02/27/apache-flink-kubernetes-operator-1.4.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.4.0 Release Announcement",section:"Flink Blog",content:`We are proud to announce the latest stable release of the operator. In addition to the expected stability improvements and fixes, the 1.4.0 release introduces the first version of the long-awaited autoscaler module.
 Flink Streaming Job Autoscaler # A highly requested feature for Flink applications is the ability to scale the pipeline based on incoming data load and the utilization of the dataflow. While Flink has already provided some of the required building blocks, this feature has not yet been realized in the open source ecosystem.
 With FLIP-271 the community set out to build such an autoscaler component as part of the Kubernetes Operator subproject. The Kubernetes Operator proved to be a great place for the autoscaler module as it already contains all the necessary bits for managing and upgrading production streaming applications.
 Fast-forward to the 1.4.0 release, we now have the first fully functional autoscaler implementation in the operator, ready to be tested and used in production applications. For more, detailed information, please refer to the Autoscaler Documentation.
@@ -1321,14 +1331,14 @@
 $ helm repo add flink-kubernetes-operator-1.4.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.4.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.4.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Anton Ippolitov, FabioWanner, Gabor Somogyi, Gyula Fora, James Busche, Kyle Ahn, Matyas Orhidi, Maximilian Michels, Mohemmad Zaid Khan, Márton Balassi, Navaneesh Kumar, Ottomata, Peter Huang, Rodrigo, Shang Yuanchun, Shipeng Xie, Swathi Chandrashekar, Tony Garrard, Usamah Jassat, Vincent Chenal, Zsombor Chikan, Peter Vary
-`}),e.add({id:82,href:"/2023/01/30/apache-flink-1.16.1-release-announcement/",title:"Apache Flink 1.16.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.16 series.
+`}),e.add({id:83,href:"/2023/01/30/apache-flink-1.16.1-release-announcement/",title:"Apache Flink 1.16.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.16 series.
 This release includes 84 bug fixes, vulnerability fixes, and minor improvements for Flink 1.16. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.16.1.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.16.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.16.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.16.1&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
 Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.16.1 Upgrade Notes # FLINK-28988 - Incorrect result for filter after temporal join The filter will not be pushed down into both inputs of the event time temporal join. This may cause incompatible plan changes compared to Flink 1.16.0, e.g., when the left input is an upsert source (like upsert-kafka connector), the query plan will remove the ChangelogNormalize node in Flink 1.16.1, while it did appear in 1.16.0.
 FLINK-29849 - Event time temporal join on an upsert source may produce incorrect execution plan This resolves the correctness issue when doing an event time temporal join with a versioned table backed by an upsert source. When the right input of the join is an upsert source, it no longer generates a ChangelogNormalize node for it. This is an incompatible plan change compared to 1.16.0
 FLINK-30383 - UseLogicalIdentifier makes datadog consider metric as custom The Datadog reporter now adds a &ldquo;flink.&rdquo; prefix to metric identifiers if &ldquo;useLogicalIdentifier&rdquo; is enabled. This is required for these metrics to be recognized as Flink metrics, not custom ones.
-Release Notes # Bug [FLINK-16582] - NettyBufferPoolTest may have warns on NettyBuffer leak [FLINK-26037] - TaskManagerRunner may crash during shutdown sequence [FLINK-26890] - DynamoDB consumer error consuming partitions close to retention [FLINK-27341] - TaskManager running together with JobManager are bind to 127.0.0.1 [FLINK-27944] - IO metrics collision happens if a task has union inputs [FLINK-28102] - Flink AkkaRpcSystemLoader fails when temporary directory is a symlink [FLINK-28526] - Fail to lateral join with UDTF from Table with timstamp column [FLINK-28695] - Fail to send partition request to restarted taskmanager [FLINK-28742] - Table.to_pandas fails with lit(&quot;xxx&quot;) [FLINK-28786] - Cannot run PyFlink 1.16 on MacOS with M1 chip [FLINK-28863] - Snapshot result of RocksDB native savepoint should have empty shared-state [FLINK-28960] - Pulsar throws java.lang.NoClassDefFoundError: javax/xml/bind/annotation/XmlElement [FLINK-28988] - Incorrect result for filter after temporal join [FLINK-29231] - PyFlink UDAF produces different results in the same sliding window [FLINK-29234] - Dead lock in DefaultLeaderElectionService [FLINK-29298] - LocalBufferPool request buffer from NetworkBufferPool hanging [FLINK-29479] - Support whether using system PythonPath for PyFlink jobs [FLINK-29539] - dnsPolicy in FlinkPod is not overridable [FLINK-29615] - MetricStore does not remove metrics of nonexistent subtasks when adaptive scheduler lowers job parallelism [FLINK-29627] - Sink - Duplicate key exception during recover more than 1 committable. [FLINK-29677] - Prevent dropping the current catalog [FLINK-29728] - TablePlanner prevents Flink from starting is working directory is a symbolic link [FLINK-29749] - flink info command support dynamic properties [FLINK-29781] - ChangelogNormalize uses wrong keys after transformation by WatermarkAssignerChangelogNormalizeTransposeRule [FLINK-29803] - Table API Scala APIs lack proper source jars [FLINK-29817] - Published metadata for apache-flink in pypi are inconsistent and causes poetry to fail [FLINK-29827] - [Connector][AsyncSinkWriter] Checkpointed states block writer from sending records [FLINK-29839] - HiveServer2 endpoint doesn&#39;t support TGetInfoType value &#39;CLI_ODBC_KEYWORDS&#39; [FLINK-29849] - Event time temporal join on an upsert source may produce incorrect execution plan [FLINK-29857] - Fix failure to connect to &#39;HiveServer2Endpoint&#39; when using hive3 beeline [FLINK-29899] - Stacktrace printing in DefaultExecutionGraphCacheTest is confusing maven test log output [FLINK-29923] - Hybrid Shuffle may face deadlock when running a task need to execute big size data [FLINK-29927] - AkkaUtils#getAddress may cause memory leak [FLINK-30030] - Unexpected behavior for overwrite in Hive dialect [FLINK-30133] - HadoopModuleFactory creates error if the security module cannot be loaded [FLINK-30168] - PyFlink Deserialization Error with Object Array [FLINK-30189] - HsSubpartitionFileReader may load data that has been consumed from memory [FLINK-30239] - The flame graph doesn&#39;t work due to groupExecutionsByLocation has bug [FLINK-30304] - Possible Deadlock in Kinesis/Firehose/DynamoDB Connector [FLINK-30308] - ClassCastException: class java.io.ObjectStreamClass$Caches$1 cannot be cast to class java.util.Map is showing in the logging when the job shutdown [FLINK-30334] - SourceCoordinator error splitRequest check cause HybridSource loss of data and hang [FLINK-30359] - Encountered NoClassDefFoundError when using flink-sql-connector-elasticsearch6 [FLINK-30366] - Python Group Agg failed in cleaning the idle state [FLINK-30525] - Cannot open jobmanager configuration web page [FLINK-30558] - The metric &#39;numRestarts&#39; reported in SchedulerBase will be overridden by metric &#39;fullRestarts&#39; [FLINK-30637] - In linux-aarch64 environment, using “is” judgment to match the window type of overwindow have returned incorrect matching results Improvement [FLINK-27327] - Add description about changing max parallelism explicitly leads to state incompatibility [FLINK-29134] - fetch metrics may cause oom(ThreadPool task pile up) [FLINK-29155] - Improve default config of grpcServer in Process Mode [FLINK-29244] - Add metric lastMaterializationDuration to ChangelogMaterializationMetricGroup [FLINK-29458] - When two tables have the same field, do not specify the table name,Exception will be thrown:SqlValidatorException :Column &#39;currency&#39; is ambiguous [FLINK-29639] - Add ResourceId in TransportException for debugging [FLINK-29693] - MiniClusterExtension should respect DEFAULT_PARALLELISM if set [FLINK-29834] - Clear static Jackson TypeFactory cache on CL release [FLINK-29966] - Replace and redesign the Python api documentation base [FLINK-30016] - Update Flink 1.16 release notes about updated oshi-core [FLINK-30116] - Don&#39;t Show Env Vars in Web UI [FLINK-30183] - We should add a proper error message in case the deprecated reflection-based instantiation of a reporter is triggered [FLINK-30357] - Wrong link in connector/jdbc doc. [FLINK-30436] - Integrate Opensearch connector docs into Flink docs v1.16 [FLINK-30592] - The unsupported hive version is not deleted on the hive overview document [FLINK-30633] - Update AWS SDKv2 to v2.19.14 [FLINK-30724] - Update doc of kafka per-partition watermark to FLIP-27 source Technical Debt [FLINK-27731] - Remove Hugo Modules integration [FLINK-29157] - Clarify the contract between CompletedCheckpointStore and SharedStateRegistry [FLINK-29957] - Rework connector docs integration [FLINK-29958] - Add new connector_artifact shortcode [FLINK-29972] - Pin Flink docs to Elasticsearch Connector 3.0.0 [FLINK-29973] - connector_artifact should append Flink minor version [FLINK-30291] - Integrate flink-connector-aws into Flink docs [FLINK-30382] - Flink 1.16 to integrate KDS/KDF docs from flink-connector-aws [FLINK-30383] - UseLogicalIdentifier makes datadog consider metric as custom `}),e.add({id:83,href:"/2023/01/20/delegation-token-framework-obtain-distribute-and-use-temporary-credentials-automatically/",title:"Delegation Token Framework: Obtain, Distribute and Use Temporary Credentials Automatically",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce that the upcoming minor version of Flink (1.17) includes the Delegation Token Framework proposed in FLIP-272. This enables Flink to authenticate to external services at a central location (JobManager) and distribute authentication tokens to the TaskManagers.
+Release Notes # Bug [FLINK-16582] - NettyBufferPoolTest may have warns on NettyBuffer leak [FLINK-26037] - TaskManagerRunner may crash during shutdown sequence [FLINK-26890] - DynamoDB consumer error consuming partitions close to retention [FLINK-27341] - TaskManager running together with JobManager are bind to 127.0.0.1 [FLINK-27944] - IO metrics collision happens if a task has union inputs [FLINK-28102] - Flink AkkaRpcSystemLoader fails when temporary directory is a symlink [FLINK-28526] - Fail to lateral join with UDTF from Table with timstamp column [FLINK-28695] - Fail to send partition request to restarted taskmanager [FLINK-28742] - Table.to_pandas fails with lit(&quot;xxx&quot;) [FLINK-28786] - Cannot run PyFlink 1.16 on MacOS with M1 chip [FLINK-28863] - Snapshot result of RocksDB native savepoint should have empty shared-state [FLINK-28960] - Pulsar throws java.lang.NoClassDefFoundError: javax/xml/bind/annotation/XmlElement [FLINK-28988] - Incorrect result for filter after temporal join [FLINK-29231] - PyFlink UDAF produces different results in the same sliding window [FLINK-29234] - Dead lock in DefaultLeaderElectionService [FLINK-29298] - LocalBufferPool request buffer from NetworkBufferPool hanging [FLINK-29479] - Support whether using system PythonPath for PyFlink jobs [FLINK-29539] - dnsPolicy in FlinkPod is not overridable [FLINK-29615] - MetricStore does not remove metrics of nonexistent subtasks when adaptive scheduler lowers job parallelism [FLINK-29627] - Sink - Duplicate key exception during recover more than 1 committable. [FLINK-29677] - Prevent dropping the current catalog [FLINK-29728] - TablePlanner prevents Flink from starting is working directory is a symbolic link [FLINK-29749] - flink info command support dynamic properties [FLINK-29781] - ChangelogNormalize uses wrong keys after transformation by WatermarkAssignerChangelogNormalizeTransposeRule [FLINK-29803] - Table API Scala APIs lack proper source jars [FLINK-29817] - Published metadata for apache-flink in pypi are inconsistent and causes poetry to fail [FLINK-29827] - [Connector][AsyncSinkWriter] Checkpointed states block writer from sending records [FLINK-29839] - HiveServer2 endpoint doesn&#39;t support TGetInfoType value &#39;CLI_ODBC_KEYWORDS&#39; [FLINK-29849] - Event time temporal join on an upsert source may produce incorrect execution plan [FLINK-29857] - Fix failure to connect to &#39;HiveServer2Endpoint&#39; when using hive3 beeline [FLINK-29899] - Stacktrace printing in DefaultExecutionGraphCacheTest is confusing maven test log output [FLINK-29923] - Hybrid Shuffle may face deadlock when running a task need to execute big size data [FLINK-29927] - AkkaUtils#getAddress may cause memory leak [FLINK-30030] - Unexpected behavior for overwrite in Hive dialect [FLINK-30133] - HadoopModuleFactory creates error if the security module cannot be loaded [FLINK-30168] - PyFlink Deserialization Error with Object Array [FLINK-30189] - HsSubpartitionFileReader may load data that has been consumed from memory [FLINK-30239] - The flame graph doesn&#39;t work due to groupExecutionsByLocation has bug [FLINK-30304] - Possible Deadlock in Kinesis/Firehose/DynamoDB Connector [FLINK-30308] - ClassCastException: class java.io.ObjectStreamClass$Caches$1 cannot be cast to class java.util.Map is showing in the logging when the job shutdown [FLINK-30334] - SourceCoordinator error splitRequest check cause HybridSource loss of data and hang [FLINK-30359] - Encountered NoClassDefFoundError when using flink-sql-connector-elasticsearch6 [FLINK-30366] - Python Group Agg failed in cleaning the idle state [FLINK-30525] - Cannot open jobmanager configuration web page [FLINK-30558] - The metric &#39;numRestarts&#39; reported in SchedulerBase will be overridden by metric &#39;fullRestarts&#39; [FLINK-30637] - In linux-aarch64 environment, using “is” judgment to match the window type of overwindow have returned incorrect matching results Improvement [FLINK-27327] - Add description about changing max parallelism explicitly leads to state incompatibility [FLINK-29134] - fetch metrics may cause oom(ThreadPool task pile up) [FLINK-29155] - Improve default config of grpcServer in Process Mode [FLINK-29244] - Add metric lastMaterializationDuration to ChangelogMaterializationMetricGroup [FLINK-29458] - When two tables have the same field, do not specify the table name,Exception will be thrown:SqlValidatorException :Column &#39;currency&#39; is ambiguous [FLINK-29639] - Add ResourceId in TransportException for debugging [FLINK-29693] - MiniClusterExtension should respect DEFAULT_PARALLELISM if set [FLINK-29834] - Clear static Jackson TypeFactory cache on CL release [FLINK-29966] - Replace and redesign the Python api documentation base [FLINK-30016] - Update Flink 1.16 release notes about updated oshi-core [FLINK-30116] - Don&#39;t Show Env Vars in Web UI [FLINK-30183] - We should add a proper error message in case the deprecated reflection-based instantiation of a reporter is triggered [FLINK-30357] - Wrong link in connector/jdbc doc. [FLINK-30436] - Integrate Opensearch connector docs into Flink docs v1.16 [FLINK-30592] - The unsupported hive version is not deleted on the hive overview document [FLINK-30633] - Update AWS SDKv2 to v2.19.14 [FLINK-30724] - Update doc of kafka per-partition watermark to FLIP-27 source Technical Debt [FLINK-27731] - Remove Hugo Modules integration [FLINK-29157] - Clarify the contract between CompletedCheckpointStore and SharedStateRegistry [FLINK-29957] - Rework connector docs integration [FLINK-29958] - Add new connector_artifact shortcode [FLINK-29972] - Pin Flink docs to Elasticsearch Connector 3.0.0 [FLINK-29973] - connector_artifact should append Flink minor version [FLINK-30291] - Integrate flink-connector-aws into Flink docs [FLINK-30382] - Flink 1.16 to integrate KDS/KDF docs from flink-connector-aws [FLINK-30383] - UseLogicalIdentifier makes datadog consider metric as custom `}),e.add({id:84,href:"/2023/01/20/delegation-token-framework-obtain-distribute-and-use-temporary-credentials-automatically/",title:"Delegation Token Framework: Obtain, Distribute and Use Temporary Credentials Automatically",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce that the upcoming minor version of Flink (1.17) includes the Delegation Token Framework proposed in FLIP-272. This enables Flink to authenticate to external services at a central location (JobManager) and distribute authentication tokens to the TaskManagers.
 Introduction # Authentication in distributed systems is not an easy task. Previously all worker nodes (TaskManagers) reading from or writing to an external system needed to authenticate on their own. In such a case several things can go wrong, including but not limited to:
 Too many authentication requests (potentially resulting in rejected requests) Large number of retries on authentication failures Re-occurring propagation/update of temporary credentials in a timely manner Dependency issues when external system libraries are having the same dependency with different versions Each authentication/temporary credentials are different making standardization challenging &hellip; The aim of Delegation Token Framework is to solve the above challenges. The framework is authentication protocol agnostic and pluggable. The primary design concept is that authentication happens only at a single location (JobManager), the obtained temporary credentials propagated automatically to all the task managers where they can be used. The token re-obtain process is also handled in the JobManager.
 New authentication providers can be added with small amount of code which is going to be loaded by Flink automatically. At the moment the following external systems are supported:
@@ -1341,7 +1351,7 @@
 An example tutorial can be found here on external system authentication.
 Summary # The Delegation Token Framework is feature complete on the master branch and is becoming generally available on the release of Flink 1.17. The framework obtains authentication tokens at a central location and propagates them to all workers on a re-occurring basis.
 Any connector to an external system which supports authentication can be a potential user of this framework. To support authentication in your connector we encourage you to implement your own DelegationTokenProvider/DelegationTokenReceiver pair.
-`}),e.add({id:84,href:"/2023/01/13/apache-flink-table-store-0.3.0-release-announcement/",title:"Apache Flink Table Store 0.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the release of the Apache Flink Table Store (0.3.0).
+`}),e.add({id:85,href:"/2023/01/13/apache-flink-table-store-0.3.0-release-announcement/",title:"Apache Flink Table Store 0.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the release of the Apache Flink Table Store (0.3.0).
 We highly recommend all users upgrade to Flink Table Store 0.3.0. 0.3.0 completed 150+ issues, which were completed by nearly 30 contributors.
 Please check out the full documentation for detailed information and user guides.
 Flink Table Store 0.3 completes many exciting features, enhances its ability as a data lake storage and greatly improves the availability of its stream pipeline. Some important features are described below.
@@ -1375,14 +1385,14 @@
 Provides Flink decoupled independent Java APIs Spark: enhance batch write, provide streaming write and streaming read Flink: complete DDL &amp; DML, providing more management operations Changelog producer: Lookup, the delay of stream reading each scenario is less than one minute Provide multi table consistent materialized views in real-time Data Integration: Schema Evolution integration, whole database integration. Please give the release a try, share your feedback on the Flink mailing list and contribute to the project!
 List of Contributors # The Apache Flink community would like to thank every one of the contributors that have made this release possible:
 Feng Wang, Hannankan, Jane Chan, Jia Liu, Jingsong Lee, Jonathan Leitschuh, JunZhang, Kirill Listopad, Liwei Li, MOBIN-F, Nicholas Jiang, Wang Luning, WencongLiu, Yubin Li, gongzhongqiang, houhang1005, liuzhuang2017, openinx, tsreaper, wuyouwuyoulian, zhuangchong, zjureel (shammon), 吴祥平
-`}),e.add({id:85,href:"/2023/01/10/apache-flink-kubernetes-operator-1.3.1-release-announcement/",title:"Apache Flink Kubernetes Operator 1.3.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink Kubernetes Operator 1.3 series.
+`}),e.add({id:86,href:"/2023/01/10/apache-flink-kubernetes-operator-1.3.1-release-announcement/",title:"Apache Flink Kubernetes Operator 1.3.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink Kubernetes Operator 1.3 series.
 The release contains fixes for several critical issues and some major stability improvements for the application upgrade mechanism.
 We highly recommend all users to upgrade to Flink Kubernetes Operator 1.3.1.
 Release Notes # Bug # [FLINK-30329] - flink-kubernetes-operator helm chart does not work with dynamic config because of use of volumeMount subPath [FLINK-30361] - Cluster deleted and created back while updating replicas [FLINK-30406] - Jobmanager Deployment error without HA metadata should not lead to unrecoverable error [FLINK-30437] - State incompatibility issue might cause state loss [FLINK-30527] - Last-state suspend followed by flinkVersion change may lead to state loss [FLINK-30528] - Job may be stuck in upgrade loop when last-state fallback is disabled and deployment is missing Improvement # [FLINK-28875] - Add FlinkSessionJobControllerTest [FLINK-30408] - Add unit test for HA metadata check logic Release Resources # The source artifacts and helm chart are available on the Downloads page of the Flink website. You can easily try out the new features shipped in the official 1.3.1 release by adding the Helm chart to your own local registry:
 $ helm repo add flink-kubernetes-operator-1.3.1 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.3.1/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.3.1/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Gyula Fora, Andrew Otto, Swathi Chandrashekar, Peter Vary
-`}),e.add({id:86,href:"/2022/12/14/apache-flink-kubernetes-operator-1.3.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.3.0 Release Announcement",section:"Flink Blog",content:`The Flink community is happy to announce that the latest Flink Kubernetes Operator version went live today. Beyond the regular operator improvements and fixes the 1.3.0 version also integrates better with some popular infrastructure management tools like OLM and Argo CD. These improvements are clear indicators that the original intentions of the Flink community, namely to provide the de facto standard solution for managing Flink applications on Kubernetes is making steady progress to becoming a reality.
+`}),e.add({id:87,href:"/2022/12/14/apache-flink-kubernetes-operator-1.3.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.3.0 Release Announcement",section:"Flink Blog",content:`The Flink community is happy to announce that the latest Flink Kubernetes Operator version went live today. Beyond the regular operator improvements and fixes the 1.3.0 version also integrates better with some popular infrastructure management tools like OLM and Argo CD. These improvements are clear indicators that the original intentions of the Flink community, namely to provide the de facto standard solution for managing Flink applications on Kubernetes is making steady progress to becoming a reality.
 Release Highlights # Upgrade to Fabric8 6.x.x and JOSDK 4.x.x Restart unhealthy Flink clusters Contribute the Flink Kubernetes Operator to OperatorHub Publish flink-kubernetes-operator-api module separately Upgrade to Fabric8 6.x.x and JOSDK 4.x.x # Two important framework components were upgraded with the current operator release, the Fabric8 client to v6.2.0 and the JOSDK to v4.1.0. These upgrades among others contain important informer improvements that help lower or completely eliminate the occurrence of certain intermittent issues when the operator looses track of managed Custom Resources.
 With the new JOSDK version, the operator now supports leader election and allows users to run standby operator replicas to reduce downtime due to operator failures. Read more about this in the docs.
 Restart unhealthy Flink clusters # Flink has its own restart strategies which are working fine in most of the cases, but there are certain circumstances when Flink can be stuck in restart loops often resulting in OutOfMemoryError: Metaspace type of state which the job cannot recover from. If the root cause is just a temporary outage of some external system, for example, the Flink job could be resurrected by simply performing a full restart on the application.
@@ -1395,7 +1405,7 @@
 $ helm repo add flink-kubernetes-operator-1.3.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.3.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.3.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Chesnay Schepler, Clara Xiong, Denis Nuțiu, Gabor Somogyi, Gyula Fora, James Busche, Jeesmon Jacob, Marton Balassi, Matyas Orhidi, Maximilian Michels, Sriram Ganesh, Steven Zhang, Thomas Weise, Tony Garrard, Usamah Jassat, Xin Hao, Yaroslav Tkachenko, Zezae Oh, Zhenqiu Huang, Zhiming, clarax, darenwkt, jiangzho, judy.zhu, pvary, ted chang, tison, yangjf2019, zhou-jiang
-`}),e.add({id:87,href:"/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/",title:"Optimising the throughput of async sinks using a custom RateLimitingStrategy",section:"Flink Blog",content:` Introduction # When designing a Flink data processing job, one of the key concerns is maximising job throughput. Sink throughput is a crucial factor because it can determine the entire job’s throughput. We generally want the highest possible write rate in the sink without overloading the destination. However, since the factors impacting a destination’s performance are variable over the job’s lifetime, the sink needs to adjust its write rate dynamically. Depending on the sink’s destination, it helps to tune the write rate using a different RateLimitingStrategy.
+`}),e.add({id:88,href:"/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/",title:"Optimising the throughput of async sinks using a custom RateLimitingStrategy",section:"Flink Blog",content:` Introduction # When designing a Flink data processing job, one of the key concerns is maximising job throughput. Sink throughput is a crucial factor because it can determine the entire job’s throughput. We generally want the highest possible write rate in the sink without overloading the destination. However, since the factors impacting a destination’s performance are variable over the job’s lifetime, the sink needs to adjust its write rate dynamically. Depending on the sink’s destination, it helps to tune the write rate using a different RateLimitingStrategy.
 This post explains how you can optimise sink throughput by configuring a custom RateLimitingStrategy on a connector that builds on the AsyncSinkBase (FLIP-171). In the sections below, we cover the design logic behind the AsyncSinkBase and the RateLimitingStrategy, then we take you through two example implementations of rate limiting strategies, specifically the CongestionControlRateLimitingStrategy and TokenBucketRateLimitingStrategy.
 Background of the AsyncSinkBase # When implementing the AsyncSinkBase, our goal was to simplify building new async sinks to custom destinations by providing common async sink functionality used with at least once processing. This has allowed users to more easily write sinks to custom destinations, such as Amazon Kinesis Data Streams and Amazon Kinesis Firehose. An additional async sink to Amazon DynamoDB (FLIP-252) is also being developed at the time of writing.
 The AsyncSinkBase provides the core implementation which handles the mechanics of async requests and responses. This includes retrying failed messages, deciding when to flush records to the destination, and persisting un-flushed records to state during checkpointing. In order to increase throughput, the async sink also dynamically adjusts the request rate depending on the destination’s responses. Read more about this in our previous 1.15 release blog post or watch our FlinkForward talk recording explaining the design of the Async Sink.
@@ -1424,11 +1434,11 @@
 Specifying a custom RateLimitingStrategy # To specify a custom RateLimitingStrategy, we have to specify it in the AsyncSinkWriterConfiguration which is passed into the constructor of the AsyncSinkWriter. For example:
 class MyCustomSinkWriter&lt;InputT&gt; extends AsyncSinkWriter&lt;InputT, MyCustomRequestEntry&gt; { MyCustomSinkWriter( ElementConverter&lt;InputT, MyCustomRequestEntry&gt; elementConverter, Sink.InitContext context, Collection&lt;BufferedRequestState&lt;MyCustomRequestEntry&gt;&gt; states) { super( elementConverter, context, AsyncSinkWriterConfiguration.builder() // ... .setRateLimitingStrategy(new TokenBucketRateLimitingStrategy()) .build(), states); } } Summary # From Apache Flink 1.16 we can customise the RateLimitingStrategy used to dynamically adjust the behaviour of the Async Sink at runtime. This allows users to tune their connector implementations based on specific use cases and needs, without having to understand the base sink’s low-level workings.
 We hope this extension will be useful for you. If you have any feedback, feel free to reach out!
-`}),e.add({id:88,href:"/2022/11/10/apache-flink-1.15.3-release-announcement/",title:"Apache Flink 1.15.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the third bug fix release of the Flink 1.15 series.
+`}),e.add({id:89,href:"/2022/11/10/apache-flink-1.15.3-release-announcement/",title:"Apache Flink 1.15.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the third bug fix release of the Flink 1.15 series.
 This release includes 59 bug fixes, vulnerability fixes, and minor improvements for Flink 1.15. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.15.3.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.15.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.15.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.15.3&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.15.3 Release Notes # Bug [FLINK-26726] - Remove the unregistered task from readersAwaitingSplit [FLINK-26890] - DynamoDB consumer error consuming partitions close to retention [FLINK-27384] - In the Hive dimension table, when the data is changed on the original partition, the create_time configuration does not take effect [FLINK-27400] - Pulsar connector subscribed the system topic when using the regex [FLINK-27415] - Read empty csv file throws exception in FileSystem table connector [FLINK-27492] - Flink table scala example does not including the scala-api jars [FLINK-27579] - The param client.timeout can not be set by dynamic properties when stopping the job [FLINK-27611] - ConcurrentModificationException during Flink-Pulsar checkpoint notification [FLINK-27954] - JobVertexFlameGraphHandler does not work on standby Dispatcher [FLINK-28084] - Pulsar unordered reader should disable retry and delete reconsume logic. [FLINK-28265] - Inconsistency in Kubernetes HA service: broken state handle [FLINK-28488] - KafkaMetricWrapper does incorrect cast [FLINK-28609] - Flink-Pulsar connector fails on larger schemas [FLINK-28863] - Snapshot result of RocksDB native savepoint should have empty shared-state [FLINK-28934] - Pulsar Source put all the splits to only one parallelism when using Exclusive subscription [FLINK-28951] - Header in janino generated java files can merge with line numbers [FLINK-28959] - 504 gateway timeout when consume large number of topics using TopicPatten [FLINK-28960] - Pulsar throws java.lang.NoClassDefFoundError: javax/xml/bind/annotation/XmlElement [FLINK-28975] - withIdleness marks all streams from FLIP-27 sources as idle [FLINK-28976] - Changelog 1st materialization delayed unneccesarily [FLINK-29130] - Correct the doc description of state.backend.local-recovery [FLINK-29138] - Project pushdown not work for lookup source [FLINK-29205] - FlinkKinesisConsumer not respecting Credential Provider configuration for EFO [FLINK-29207] - Pulsar message eventTime may be incorrectly set to a negative number [FLINK-29253] - DefaultJobmanagerRunnerRegistry#localCleanupAsync calls close instead of closeAsync [FLINK-29324] - Calling Kinesis connector close method before subtask starts running results in NPE [FLINK-29325] - Fix documentation bug on how to enable batch mode for streaming examples [FLINK-29381] - Key_Shared subscription isn&#39;t works in the latest Pulsar connector [FLINK-29395] - [Kinesis][EFO] Issue using EFO consumer at timestamp with empty shard [FLINK-29397] - Race condition in StreamTask can lead to NPE if changelog is disabled [FLINK-29459] - Sink v2 has bugs in supporting legacy v1 implementations with global committer [FLINK-29477] - ClassCastException when collect primitive array to Python [FLINK-29479] - Support whether using system PythonPath for PyFlink jobs [FLINK-29483] - flink python udf arrow in thread model bug [FLINK-29500] - InitializeOnMaster uses wrong parallelism with AdaptiveScheduler [FLINK-29509] - Set correct subtaskId during recovery of committables [FLINK-29512] - Align SubtaskCommittableManager checkpointId with CheckpointCommittableManagerImpl checkpointId during recovery [FLINK-29539] - dnsPolicy in FlinkPod is not overridable [FLINK-29567] - Revert sink output metric names from numRecordsSend back to numRecordsOut [FLINK-29613] - Wrong message size assertion in Pulsar&#39;s batch message [FLINK-29627] - Sink - Duplicate key exception during recover more than 1 committable. [FLINK-29645] - BatchExecutionKeyedStateBackend is using incorrect ExecutionConfig when creating serializer [FLINK-29749] - flink info command support dynamic properties [FLINK-29803] - Table API Scala APIs lack proper source jars [FLINK-29827] - [Connector][AsyncSinkWriter] Checkpointed states block writer from sending records [FLINK-29927] - AkkaUtils#getAddress may cause memory leak Improvement [FLINK-24906] - Improve CSV format handling and support [FLINK-28733] - jobmanager.sh should support dynamic properties [FLINK-28909] - Add ribbon filter policy option in RocksDBConfiguredOptions [FLINK-29134] - fetch metrics may cause oom(ThreadPool task pile up) [FLINK-29158] - Fix logging in DefaultCompletedCheckpointStore [FLINK-29223] - Missing info output for when filtering JobGraphs based on their persisted JobResult [FLINK-29255] - FLIP-258 - Enforce binary compatibility in patch releases [FLINK-29476] - Kinesis Connector retry mechanism not applied to EOFException [FLINK-29503] - Add backpressureLevel field without hyphens [FLINK-29504] - Jar upload spec should define a schema `}),e.add({id:89,href:"/2022/10/28/announcing-the-release-of-apache-flink-1.16/",title:"Announcing the Release of Apache Flink 1.16",section:"Flink Blog",content:`Apache Flink continues to grow at a rapid pace and is one of the most active communities in Apache. Flink 1.16 had over 240 contributors enthusiastically participating, with 19 FLIPs and 1100+ issues completed, bringing a lot of exciting features to the community.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.15.3 Release Notes # Bug [FLINK-26726] - Remove the unregistered task from readersAwaitingSplit [FLINK-26890] - DynamoDB consumer error consuming partitions close to retention [FLINK-27384] - In the Hive dimension table, when the data is changed on the original partition, the create_time configuration does not take effect [FLINK-27400] - Pulsar connector subscribed the system topic when using the regex [FLINK-27415] - Read empty csv file throws exception in FileSystem table connector [FLINK-27492] - Flink table scala example does not including the scala-api jars [FLINK-27579] - The param client.timeout can not be set by dynamic properties when stopping the job [FLINK-27611] - ConcurrentModificationException during Flink-Pulsar checkpoint notification [FLINK-27954] - JobVertexFlameGraphHandler does not work on standby Dispatcher [FLINK-28084] - Pulsar unordered reader should disable retry and delete reconsume logic. [FLINK-28265] - Inconsistency in Kubernetes HA service: broken state handle [FLINK-28488] - KafkaMetricWrapper does incorrect cast [FLINK-28609] - Flink-Pulsar connector fails on larger schemas [FLINK-28863] - Snapshot result of RocksDB native savepoint should have empty shared-state [FLINK-28934] - Pulsar Source put all the splits to only one parallelism when using Exclusive subscription [FLINK-28951] - Header in janino generated java files can merge with line numbers [FLINK-28959] - 504 gateway timeout when consume large number of topics using TopicPatten [FLINK-28960] - Pulsar throws java.lang.NoClassDefFoundError: javax/xml/bind/annotation/XmlElement [FLINK-28975] - withIdleness marks all streams from FLIP-27 sources as idle [FLINK-28976] - Changelog 1st materialization delayed unneccesarily [FLINK-29130] - Correct the doc description of state.backend.local-recovery [FLINK-29138] - Project pushdown not work for lookup source [FLINK-29205] - FlinkKinesisConsumer not respecting Credential Provider configuration for EFO [FLINK-29207] - Pulsar message eventTime may be incorrectly set to a negative number [FLINK-29253] - DefaultJobmanagerRunnerRegistry#localCleanupAsync calls close instead of closeAsync [FLINK-29324] - Calling Kinesis connector close method before subtask starts running results in NPE [FLINK-29325] - Fix documentation bug on how to enable batch mode for streaming examples [FLINK-29381] - Key_Shared subscription isn&#39;t works in the latest Pulsar connector [FLINK-29395] - [Kinesis][EFO] Issue using EFO consumer at timestamp with empty shard [FLINK-29397] - Race condition in StreamTask can lead to NPE if changelog is disabled [FLINK-29459] - Sink v2 has bugs in supporting legacy v1 implementations with global committer [FLINK-29477] - ClassCastException when collect primitive array to Python [FLINK-29479] - Support whether using system PythonPath for PyFlink jobs [FLINK-29483] - flink python udf arrow in thread model bug [FLINK-29500] - InitializeOnMaster uses wrong parallelism with AdaptiveScheduler [FLINK-29509] - Set correct subtaskId during recovery of committables [FLINK-29512] - Align SubtaskCommittableManager checkpointId with CheckpointCommittableManagerImpl checkpointId during recovery [FLINK-29539] - dnsPolicy in FlinkPod is not overridable [FLINK-29567] - Revert sink output metric names from numRecordsSend back to numRecordsOut [FLINK-29613] - Wrong message size assertion in Pulsar&#39;s batch message [FLINK-29627] - Sink - Duplicate key exception during recover more than 1 committable. [FLINK-29645] - BatchExecutionKeyedStateBackend is using incorrect ExecutionConfig when creating serializer [FLINK-29749] - flink info command support dynamic properties [FLINK-29803] - Table API Scala APIs lack proper source jars [FLINK-29827] - [Connector][AsyncSinkWriter] Checkpointed states block writer from sending records [FLINK-29927] - AkkaUtils#getAddress may cause memory leak Improvement [FLINK-24906] - Improve CSV format handling and support [FLINK-28733] - jobmanager.sh should support dynamic properties [FLINK-28909] - Add ribbon filter policy option in RocksDBConfiguredOptions [FLINK-29134] - fetch metrics may cause oom(ThreadPool task pile up) [FLINK-29158] - Fix logging in DefaultCompletedCheckpointStore [FLINK-29223] - Missing info output for when filtering JobGraphs based on their persisted JobResult [FLINK-29255] - FLIP-258 - Enforce binary compatibility in patch releases [FLINK-29476] - Kinesis Connector retry mechanism not applied to EOFException [FLINK-29503] - Add backpressureLevel field without hyphens [FLINK-29504] - Jar upload spec should define a schema `}),e.add({id:90,href:"/2022/10/28/announcing-the-release-of-apache-flink-1.16/",title:"Announcing the Release of Apache Flink 1.16",section:"Flink Blog",content:`Apache Flink continues to grow at a rapid pace and is one of the most active communities in Apache. Flink 1.16 had over 240 contributors enthusiastically participating, with 19 FLIPs and 1100+ issues completed, bringing a lot of exciting features to the community.
 Flink has become the leading role and factual standard of stream processing, and the concept of the unification of stream and batch data processing is gradually gaining recognition and is being successfully implemented in more and more companies. Previously, the integrated stream and batch concept placed more emphasis on a unified API and a unified computing framework. This year, based on this, Flink proposed the next development direction of Flink-Streaming Warehouse (Streamhouse), which further upgraded the scope of stream-batch integration: it truly completes not only the unified computation but also unified storage, thus realizing unified real-time analysis.
 In 1.16, the Flink community has completed many improvements for both batch and stream processing:
 For batch processing, all-round improvements in ease of use, stability and performance have been completed. 1.16 is a milestone version of Flink batch processing and an important step towards maturity. Ease of use: with the introduction of SQL Gateway and full compatibility with Hive Server2, users can submit Flink SQL jobs and Hive SQL jobs very easily, and it is also easy to connect to the original Hive ecosystem. Functionality: Join hints let Flink SQL users manually specify join strategies to avoid unreasonable execution plans. The compatibility of Hive SQL has reached 94%, and users can migrate from Hive to Flink at a very low cost. Stability: Propose a speculative execution mechanism to reduce the long tail sub-tasks of a job and improve the stability. Improve HashJoin and introduce failure rollback mechanism to avoid join failure. Performance: Introduce dynamic partition pruning to reduce the Scan I/O and improve join processing for the star-schema queries. There is 30% improvement in the TPC-DS benchmark. We can use hybrid shuffle mode to improve resource usage and processing performance. For stream processing, there are a number of significant improvements: Changelog State Backend provides users with second or even millisecond checkpoints to dramatically improve the fault tolerance experience, while providing a smaller end-to-end latency experience for transactional Sink jobs. Lookup join is widely used in stream processing. Slow lookup speed, low throughput and delay update are resolved through common cache mechanism, asynchronous io and retriable lookup. These features are very useful, solving the pain points that users often complain about, and supporting richer scenarios. From the first day of the birth of Flink SQL, there were some non-deterministic operations that could cause incorrect results or exceptions, which caused great distress to users. In 1.16, we spent a lot of effort to solve most of the problems, and we will continue to improve in the future. With the further refinement of the integration of stream and batch, and the continuous iteration of the Flink Table Store (0.2 has been released), the Flink community is pushing the Streaming warehouse from concept to reality and maturity step by step.
@@ -1473,11 +1483,11 @@
 Upgrade Notes # We aim to make upgrades as smooth as possible, but some of the changes require users to adjust some parts of the program when upgrading to Apache Flink 1.16. Please take a look at the release notes for a list of adjustments to make and issues to check during upgrades.
 List of Contributors # The Apache Flink community would like to thank each one of the contributors that have made this release possible:
 1996fanrui, Ada Wang, Ada Wong, Ahmed Hamdy, Aitozi, Alexander Fedulov, Alexander Preuß, Alexander Trushev, Andriy Redko, Anton Kalashnikov, Arvid Heise, Ben Augarten, Benchao Li, BiGsuw, Biao Geng, Bobby Richard, Brayno, CPS794, Cheng Pan, Chengkai Yang, Chesnay Schepler, Danny Cranmer, David N Perkins, Dawid Wysakowicz, Dian Fu, DingGeGe, EchoLee5, Etienne Chauchot, Fabian Paul, Ferenc Csaky, Francesco Guardiani, Gabor Somogyi, Gen Luo, Gyula Fora, Haizhou Zhao, Hangxiang Yu, Hao Wang, Hong Liang Teoh, Hong Teoh, Hongbo Miao, HuangXingBo, Ingo Bürk, Jacky Lau, Jane Chan, Jark Wu, Jay Li, Jia Liu, Jie Wang, Jin, Jing Ge, Jing Zhang, Jingsong Lee, Jinhu Wu, Joe Moser, Joey Pereira, Jun He, JunRuiLee, Juntao Hu, JustDoDT, Kai Chen, Krzysztof Chmielewski, Krzysztof Dziolak, Kyle Dong, LeoZhang, Levani Kokhreidze, Lihe Ma, Lijie Wang, Liu Jiangang, Luning Wang, Marios Trivyzas, Martijn Visser, MartijnVisser, Mason Chen, Matthias Pohl, Metehan Yıldırım, Michael, Mingde Peng, Mingliang Liu, Mulavar, Márton Balassi, Nie yingping, Niklas Semmler, Paul Lam, Paul Lin, Paul Zhang, PengYuan, Piotr Nowojski, Qingsheng Ren, Qishang Zhong, Ran Tao, Robert Metzger, Roc Marshal, Roman Boyko, Roman Khachatryan, Ron, Ron Cohen, Ruanshubin, Rudi Kershaw, Rufus Refactor, Ryan Skraba, Sebastian Mattheis, Sergey, Sergey Nuyanzin, Shengkai, Shubham Bansal, SmirAlex, Smirnov Alexander, SteNicholas, Steven van Rossum, Suhan Mao, Tan Yuxin, Tartarus0zm, TennyZhuang, Terry Wang, Thesharing, Thomas Weise, Timo Walther, Tom, Tony Wei, Weijie Guo, Wencong Liu, WencongLiu, Xintong Song, Xuyang, Yangze Guo, Yi Tang, Yu Chen, Yuan Huang, Yubin Li, Yufan Sheng, Yufei Zhang, Yun Gao, Yun Tang, Yuxin Tan, Zakelly, Zhanghao Chen, Zhu Zhu, Zichen Liu, Zili Sun, acquachen, bgeng777, billyrrr, bzhao, caoyu, chenlei677, chenzihao, chenzihao5, coderap, cphe, davidliu, dependabot[bot], dkkb, dusukang, empcl, eyys, fanrui, fengjiankun, fengli, fredia, gabor.g.somogyi, godfreyhe, gongzhongqiang, harker2015, hongli, huangxingbo, huweihua, jayce, jaydonzhou, jiabao.sun, kevin.cyj, kurt, lidefu, lijiewang.wlj, liliwei, lincoln lee, lincoln.lil, littleeleventhwolf, liufangqi, liujia10, liujiangang, liujingmao, liuyongvs, liuzhuang2017, longwang, lovewin99, luoyuxia, mans2singh, maosuhan, mayue.fight, mayuehappy, nieyingping, pengmide, pengmingde, polaris6, pvary, qinjunjerry, realdengziqi, root, shammon, shihong90, shuiqiangchen, slinkydeveloper, snailHumming, snuyanzin, suxinglee, sxnan, tison, trushev, tsreaper, unknown, wangfeifan, wangyang0918, wangzhiwu, wenbingshen, xiangqiao123, xuyang, yangjf2019, yangjunhan, yangsanity, yangxin, ylchou, yuchengxin, yunfengzhou-hub, yuxia Luo, yuzelin, zhangchaoming, zhangjingcun, zhangmang, zhangzhengqi3, zhaoweinan, zhengyunhong.zyh, zhenyu xing, zhouli, zhuanshenbsj1, zhuzhu.zz, zoucao, zp, 周磊, 饶紫轩,, 鲍健昕 愚鲤, 帝国阿三
-`}),e.add({id:90,href:"/2022/10/13/apache-flink-table-store-0.2.1-release-announcement/",title:"Apache Flink Table Store 0.2.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink Table Store 0.2 series.
+`}),e.add({id:91,href:"/2022/10/13/apache-flink-table-store-0.2.1-release-announcement/",title:"Apache Flink Table Store 0.2.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink Table Store 0.2 series.
 This release includes 13 bug fixes, vulnerability fixes, and minor improvements for Flink Table Store 0.2. Below you will find a list of all bugfixes and improvements. For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink Table Store 0.2.1.
 Release Artifacts # Binaries # You can find the binaries on the updated Downloads page.
-Release Notes # Bug [FLINK-29098] - StoreWriteOperator#prepareCommit should let logSinkFunction flush first before fetching offset [FLINK-29241] - Can not overwrite from empty input [FLINK-29273] - Page not enough Exception in SortBufferMemTable [FLINK-29278] - BINARY type is not supported in table store [FLINK-29295] - Clear RecordWriter slower to avoid causing frequent compaction conflicts [FLINK-29367] - Avoid manifest corruption for incorrect checkpoint recovery [FLINK-29369] - Commit delete file failure due to Checkpoint aborted [FLINK-29385] - AddColumn in flink table store should check the duplicate field names [FLINK-29412] - Connection leak in orc reader Improvement [FLINK-29154] - Support LookupTableSource for table store [FLINK-29181] - log.system can be congiured by dynamic options [FLINK-29226] - Throw exception for streaming insert overwrite [FLINK-29276] - Flush all memory in SortBufferMemTable.clear `}),e.add({id:91,href:"/2022/10/07/apache-flink-kubernetes-operator-1.2.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.2.0 Release Announcement",section:"Flink Blog",content:`We are proud to announce the latest stable release of the operator. The 1.2.0 release adds support for the Standalone Kubernetes deployment mode and includes several improvements to the core logic.
+Release Notes # Bug [FLINK-29098] - StoreWriteOperator#prepareCommit should let logSinkFunction flush first before fetching offset [FLINK-29241] - Can not overwrite from empty input [FLINK-29273] - Page not enough Exception in SortBufferMemTable [FLINK-29278] - BINARY type is not supported in table store [FLINK-29295] - Clear RecordWriter slower to avoid causing frequent compaction conflicts [FLINK-29367] - Avoid manifest corruption for incorrect checkpoint recovery [FLINK-29369] - Commit delete file failure due to Checkpoint aborted [FLINK-29385] - AddColumn in flink table store should check the duplicate field names [FLINK-29412] - Connection leak in orc reader Improvement [FLINK-29154] - Support LookupTableSource for table store [FLINK-29181] - log.system can be congiured by dynamic options [FLINK-29226] - Throw exception for streaming insert overwrite [FLINK-29276] - Flush all memory in SortBufferMemTable.clear `}),e.add({id:92,href:"/2022/10/07/apache-flink-kubernetes-operator-1.2.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.2.0 Release Announcement",section:"Flink Blog",content:`We are proud to announce the latest stable release of the operator. The 1.2.0 release adds support for the Standalone Kubernetes deployment mode and includes several improvements to the core logic.
 Release Highlights # Standalone deployment mode support Improved upgrade flow Readiness and liveness probes Flexible job jar handling Standalone deployment mode support # Until now the operator relied exclusively on Flink’s built-in Native Kubernetes integration to deploy and manage Flink clusters. When using the Native deployment mode the Flink cluster communicates directly with Kubernetes to allocate/deallocate TaskManager resources on the fly. While this leads to a very simple deployment model, in some environments it also means higher security exposure as the user code running on the Flink cluster may gain the same Kubernetes access privileges.
 Flink Kubernetes Operator 1.2.0 brings Standalone mode support for FlinkDeployment resources.
 When using the standalone mode, the operator itself sets up the Job and TaskManager resources for the Flink cluster. Flink processes then run without any need for Kubernetes access. In fact in this mode the Flink cluster itself is unaware that it is running in a Kubernetes environment. If unknown or external code is being executed on the Flink cluster then Standalone mode adds another layer of security.
@@ -1492,11 +1502,11 @@
 Release Resources # The source artifacts and helm chart are now available on the updated Downloads page of the Flink website.
 $ helm repo add flink-kubernetes-operator-1.2.0 https://archive.apache.org/dist/flink/flink-kubernetes-operator-1.2.0/ $ helm install flink-kubernetes-operator flink-kubernetes-operator-1.2.0/flink-kubernetes-operator --set webhook.create=false You can also find official Kubernetes Operator Docker images of the new version on [Dockerhub](https://hub.docker.com/r/apache/flink-kubernetes-operator). For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Aitozi, Avocadomaster, ConradJam, Dylan Meissner, Gabor Somogyi, Gaurav Miglani, Gyula Fora, Jeesmon Jacob, Joao Ubaldo, Marton Balassi, Matyas Orhidi, Maximilian Michels, Nicholas Jiang, Peter Huang, Robson Roberto Souza Peixoto, Thomas Weise, Tim, Usamah Jassat, Xin Hao, Yaroslav Tkachenko
-`}),e.add({id:92,href:"/2022/09/28/apache-flink-1.14.6-release-announcement/",title:"Apache Flink 1.14.6 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.14.
+`}),e.add({id:93,href:"/2022/09/28/apache-flink-1.14.6-release-announcement/",title:"Apache Flink 1.14.6 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.14.
 This release includes 34 bug fixes, vulnerability fixes and minor improvements for Flink 1.14. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.14.6.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.14.6&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.6&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.6&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.14.6 Release Notes # Bug [FLINK-24862] - The user-defined hive udaf/udtf cannot be used normally in hive dialect [FLINK-25454] - Negative time in throughput calculator [FLINK-27041] - KafkaSource in batch mode failing if any topic partition is empty [FLINK-27399] - Pulsar connector didn&#39;t set start consuming position correctly [FLINK-27418] - Flink SQL TopN result is wrong [FLINK-27683] - Insert into (column1, column2) Values(.....) fails with SQL hints [FLINK-27762] - Kafka WakeupException during handling splits changes [FLINK-28019] - Error in RetractableTopNFunction when retracting a stale record with state ttl enabled [FLINK-28057] - LD_PRELOAD is hardcoded to x64 on flink-docker [FLINK-28357] - Watermark issue when recovering Finished sources [FLINK-28454] - Fix the wrong timestamp example of KafkaSource [FLINK-28609] - Flink-Pulsar connector fails on larger schemas [FLINK-28880] - Fix CEP doc with wrong result of strict contiguity of looping patterns [FLINK-28908] - Coder for LIST type is incorrectly chosen is PyFlink [FLINK-28978] - Kinesis connector doesn't work for new AWS regions [FLINK-29130] - Correct the doc description of state.backend.local-recovery [FLINK-29138] - Project pushdown not work for lookup source Improvement [FLINK-27865] - Add guide and example for configuring SASL and SSL in Kafka SQL connector document [FLINK-28094] - Upgrade AWS SDK to support ap-southeast-3 `}),e.add({id:93,href:"/2022/09/08/regarding-akkas-licensing-change/",title:"Regarding Akka's licensing change",section:"Flink Blog",content:`On September 7th Lightbend announced a license change for the Akka project, the TL;DR being that you will need a commercial license to use future versions of Akka (2.7+) in production if you exceed a certain revenue threshold.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.14.6 Release Notes # Bug [FLINK-24862] - The user-defined hive udaf/udtf cannot be used normally in hive dialect [FLINK-25454] - Negative time in throughput calculator [FLINK-27041] - KafkaSource in batch mode failing if any topic partition is empty [FLINK-27399] - Pulsar connector didn&#39;t set start consuming position correctly [FLINK-27418] - Flink SQL TopN result is wrong [FLINK-27683] - Insert into (column1, column2) Values(.....) fails with SQL hints [FLINK-27762] - Kafka WakeupException during handling splits changes [FLINK-28019] - Error in RetractableTopNFunction when retracting a stale record with state ttl enabled [FLINK-28057] - LD_PRELOAD is hardcoded to x64 on flink-docker [FLINK-28357] - Watermark issue when recovering Finished sources [FLINK-28454] - Fix the wrong timestamp example of KafkaSource [FLINK-28609] - Flink-Pulsar connector fails on larger schemas [FLINK-28880] - Fix CEP doc with wrong result of strict contiguity of looping patterns [FLINK-28908] - Coder for LIST type is incorrectly chosen is PyFlink [FLINK-28978] - Kinesis connector doesn't work for new AWS regions [FLINK-29130] - Correct the doc description of state.backend.local-recovery [FLINK-29138] - Project pushdown not work for lookup source Improvement [FLINK-27865] - Add guide and example for configuring SASL and SSL in Kafka SQL connector document [FLINK-28094] - Upgrade AWS SDK to support ap-southeast-3 `}),e.add({id:94,href:"/2022/09/08/regarding-akkas-licensing-change/",title:"Regarding Akka's licensing change",section:"Flink Blog",content:`On September 7th Lightbend announced a license change for the Akka project, the TL;DR being that you will need a commercial license to use future versions of Akka (2.7+) in production if you exceed a certain revenue threshold.
 Within a few hours of the announcement several people reached out to the Flink project, worrying about the impact this has on Flink, as we use Akka internally.
 The purpose of this blogpost is to clarify our position on the matter.
 Please be aware that this topic is still quite fresh, and things are subject to change.
@@ -1517,7 +1527,7 @@
 How does Flink use Akka? # Akka is used in the coordination layer of Flink to
 exchange status messages between processes/components (e.g., JobManager and TaskManager), enforce certain guarantees w.r.t. multi-threading (i.e., only one thread can make changes to the internal state of a component) observe components for unexpected crashes (i.e., notice and handle TaskManager thread crashes). What this means is that we are using very few functionalities of Akka.
 Additionally, that we use Akka is an implementation detail that the vast majority of Flink code isn&rsquo;t aware of, meaning that we can replace it with something else without having to change Flink significantly.
-`}),e.add({id:94,href:"/2022/08/29/apache-flink-table-store-0.2.0-release-announcement/",title:"Apache Flink Table Store 0.2.0 Release Announcement",section:"Flink Blog",content:` The Apache Flink community is pleased to announce the release of the Apache Flink Table Store (0.2.0).
+`}),e.add({id:95,href:"/2022/08/29/apache-flink-table-store-0.2.0-release-announcement/",title:"Apache Flink Table Store 0.2.0 Release Announcement",section:"Flink Blog",content:` The Apache Flink community is pleased to announce the release of the Apache Flink Table Store (0.2.0).
 Please check out the full documentation for detailed information and user guides.
 What is Flink Table Store # Flink Table Store is a data lake storage for streaming updates/deletes changelog ingestion and high-performance queries in real time.
 As a new type of updatable data lake, Flink Table Store has the following features:
@@ -1533,12 +1543,12 @@
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # The Apache Flink community would like to thank every one of the contributors that have made this release possible:
 Jane Chan, Jia Liu, Jingsong Lee, liliwei, Nicholas Jiang, openinx, tsreaper
-`}),e.add({id:95,href:"/2022/08/24/apache-flink-1.15.2-release-announcement/",title:"Apache Flink 1.15.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.15 series.
+`}),e.add({id:96,href:"/2022/08/24/apache-flink-1.15.2-release-announcement/",title:"Apache Flink 1.15.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.15 series.
 This release includes 30 bug fixes, vulnerability fixes, and minor improvements for Flink 1.15. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.15.2.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.15.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.15.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.15.2&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
 Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.15.2 Upgrade Notes # For Table API: 1.15.0 and 1.15.1 generated non-deterministic UIDs for operators that make it difficult/impossible to restore state or upgrade to next patch version. A new table.exec.uid.generation config option (with correct default behavior) disables setting a UID for new pipelines from non-compiled plans. Existing pipelines can set table.exec.uid.generation=ALWAYS if the 1.15.0/1 behavior was acceptable due to a stable environment. See FLINK-28861 for more information.
-Release Notes # Bug [FLINK-23528] - stop-with-savepoint can fail with FlinkKinesisConsumer [FLINK-25097] - Bug in inner join when the filter condition is boolean type [FLINK-26931] - Pulsar sink&#39;s producer name should be unique [FLINK-27399] - Pulsar connector didn&#39;t set start consuming position correctly [FLINK-27570] - Checkpoint path error does not cause the job to stop [FLINK-27794] - The primary key obtained from MySQL is incorrect by using MysqlCatalog [FLINK-27856] - Adding pod template without spec crashes job manager [FLINK-28027] - Initialise Async Sink maximum number of in flight messages to low number for rate limiting strategy [FLINK-28057] - LD_PRELOAD is hardcoded to x64 on flink-docker [FLINK-28226] - &#39;Run kubernetes pyflink application test&#39; fails while pulling image [FLINK-28239] - Table-Planner-Loader lacks access to commons-math3 [FLINK-28240] - NettyShuffleMetricFactory#RequestedMemoryUsageMetric#getValue may throw ArithmeticException when the total segments of NetworkBufferPool is 0 [FLINK-28250] - exactly-once sink kafka cause out of memory [FLINK-28269] - Kubernetes test failed with permission denied [FLINK-28322] - DataStreamScanProvider&#39;s new method is not compatible [FLINK-28357] - Watermark issue when recovering Finished sources [FLINK-28404] - Annotation @InjectClusterClient does not work correctly with RestClusterClient [FLINK-28454] - Fix the wrong timestamp example of KafkaSource [FLINK-28577] - 1.15.1 web ui console report error about checkpoint size [FLINK-28602] - StateChangeFsUploader cannot close stream normally while enabling compression [FLINK-28817] - NullPointerException in HybridSource when restoring from checkpoint [FLINK-28835] - Savepoint and checkpoint capabilities and limitations table is incorrect [FLINK-28861] - Non-deterministic UID generation might cause issues during restore [FLINK-28880] - Fix CEP doc with wrong result of strict contiguity of looping patterns [FLINK-28908] - Coder for LIST type is incorrectly chosen is PyFlink [FLINK-28978] - Kinesis connector doesn&#39;t work for new AWS regions [FLINK-28994] - Enable withCredentials for Flink UI Improvement [FLINK-27199] - Bump Pulsar to 2.10.0 for fixing the unstable Pulsar test environment. [FLINK-27865] - Add guide and example for configuring SASL and SSL in Kafka SQL connector document [FLINK-28094] - Upgrade AWS SDK to support ap-southeast-3 [FLINK-28140] - Improve the documentation by adding Python examples [FLINK-28486] - [docs-zh] Flink FileSystem SQL Connector Doc is not right `}),e.add({id:96,href:"/2022/07/25/apache-flink-kubernetes-operator-1.1.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.1.0 Release Announcement",section:"Flink Blog",content:`The community has continued to work hard on improving the Flink Kubernetes Operator capabilities since our first production ready release we launched about two months ago.
+Release Notes # Bug [FLINK-23528] - stop-with-savepoint can fail with FlinkKinesisConsumer [FLINK-25097] - Bug in inner join when the filter condition is boolean type [FLINK-26931] - Pulsar sink&#39;s producer name should be unique [FLINK-27399] - Pulsar connector didn&#39;t set start consuming position correctly [FLINK-27570] - Checkpoint path error does not cause the job to stop [FLINK-27794] - The primary key obtained from MySQL is incorrect by using MysqlCatalog [FLINK-27856] - Adding pod template without spec crashes job manager [FLINK-28027] - Initialise Async Sink maximum number of in flight messages to low number for rate limiting strategy [FLINK-28057] - LD_PRELOAD is hardcoded to x64 on flink-docker [FLINK-28226] - &#39;Run kubernetes pyflink application test&#39; fails while pulling image [FLINK-28239] - Table-Planner-Loader lacks access to commons-math3 [FLINK-28240] - NettyShuffleMetricFactory#RequestedMemoryUsageMetric#getValue may throw ArithmeticException when the total segments of NetworkBufferPool is 0 [FLINK-28250] - exactly-once sink kafka cause out of memory [FLINK-28269] - Kubernetes test failed with permission denied [FLINK-28322] - DataStreamScanProvider&#39;s new method is not compatible [FLINK-28357] - Watermark issue when recovering Finished sources [FLINK-28404] - Annotation @InjectClusterClient does not work correctly with RestClusterClient [FLINK-28454] - Fix the wrong timestamp example of KafkaSource [FLINK-28577] - 1.15.1 web ui console report error about checkpoint size [FLINK-28602] - StateChangeFsUploader cannot close stream normally while enabling compression [FLINK-28817] - NullPointerException in HybridSource when restoring from checkpoint [FLINK-28835] - Savepoint and checkpoint capabilities and limitations table is incorrect [FLINK-28861] - Non-deterministic UID generation might cause issues during restore [FLINK-28880] - Fix CEP doc with wrong result of strict contiguity of looping patterns [FLINK-28908] - Coder for LIST type is incorrectly chosen is PyFlink [FLINK-28978] - Kinesis connector doesn&#39;t work for new AWS regions [FLINK-28994] - Enable withCredentials for Flink UI Improvement [FLINK-27199] - Bump Pulsar to 2.10.0 for fixing the unstable Pulsar test environment. [FLINK-27865] - Add guide and example for configuring SASL and SSL in Kafka SQL connector document [FLINK-28094] - Upgrade AWS SDK to support ap-southeast-3 [FLINK-28140] - Improve the documentation by adding Python examples [FLINK-28486] - [docs-zh] Flink FileSystem SQL Connector Doc is not right `}),e.add({id:97,href:"/2022/07/25/apache-flink-kubernetes-operator-1.1.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.1.0 Release Announcement",section:"Flink Blog",content:`The community has continued to work hard on improving the Flink Kubernetes Operator capabilities since our first production ready release we launched about two months ago.
 With the release of Flink Kubernetes Operator 1.1.0 we are proud to announce a number of exciting new features improving the overall experience of managing Flink resources and the operator itself in production environments.
 Release Highlights # A non-exhaustive list of some of the more exciting features added in the release:
 Kubernetes Events on application and job state changes New operator metrics Unified and more robust reconciliation flow Periodic savepoints Custom Flink Resource Listeners Dynamic watched namespaces New built-in examples For Flink SQL and PyFlink Experimental autoscaling support Kubernetes Events for Application and Job State Changes # The operator now emits native Kubernetes Events on relevant Flink Deployment and Job changes. This includes status changes, custom resource specification changes, deployment failures, etc.
@@ -1567,7 +1577,7 @@
 You can also find official Kubernetes Operator Docker images of the new version on Dockerhub.
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # Aitozi, Biao Geng, Chethan, ConradJam, Dora Marsal, Gyula Fora, Hao Xin, Hector Miuler Malpica Gallegos, Jaganathan Asokan, Jeesmon Jacob, Jim Busche, Maksim Aniskov, Marton Balassi, Matyas Orhidi, Nicholas Jiang, Peng Yuan, Peter Vary, Thomas Weise, Xin Hao, Yang Wang
-`}),e.add({id:97,href:"/2022/07/12/apache-flink-ml-2.1.0-release-announcement/",title:"Apache Flink ML 2.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink ML 2.1.0! This release focuses on improving Flink ML&rsquo;s infrastructure, such as Python SDK, memory management, and benchmark framework, to facilitate the development of performant, memory-safe, and easy-to-use algorithm libraries. We validated the enhanced infrastructure by implementing, benchmarking, and optimizing 10 new algorithms in Flink ML, and confirmed that Flink ML can meet or exceed the performance of selected algorithms from alternative popular ML libraries. In addition, this release added example Python and Java programs for each algorithm in the library to help users learn and use Flink ML.
+`}),e.add({id:98,href:"/2022/07/12/apache-flink-ml-2.1.0-release-announcement/",title:"Apache Flink ML 2.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink ML 2.1.0! This release focuses on improving Flink ML&rsquo;s infrastructure, such as Python SDK, memory management, and benchmark framework, to facilitate the development of performant, memory-safe, and easy-to-use algorithm libraries. We validated the enhanced infrastructure by implementing, benchmarking, and optimizing 10 new algorithms in Flink ML, and confirmed that Flink ML can meet or exceed the performance of selected algorithms from alternative popular ML libraries. In addition, this release added example Python and Java programs for each algorithm in the library to help users learn and use Flink ML.
 With the improvements and performance benchmarks made in this release, we believe Flink ML&rsquo;s infrastructure is ready for use by the interested developers in the community to build performant pythonic machine learning libraries.
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA! We hope you like the new release and we’d be eager to learn about your experience with it.
 Notable Features # API and Infrastructure # Supporting fine-grained per-operator memory management # Before this release, algorithm operators with internal states (e.g. the training data to be replayed for each round of iteration) store state data using the state-backend API (e.g. ListState). Such an operator either needs to store all data in memory, which risks OOM, or it needs to always store data on disk. In the latter case, it needs to read and de-serialize all data from disks repeatedly in each round of iteration even if the data can fit in RAM, leading to sub-optimal performance when the training data size is small. This makes it hard for developers to write performant and memory-safe operators.
@@ -1588,7 +1598,7 @@
 The source artifacts is now available on the updated Downloads page of the Flink website, and the most recent distribution of Flink ML Python package is available on PyPI.
 List of Contributors # The Apache Flink community would like to thank each one of the contributors that have made this release possible:
 Yunfeng Zhou, Zhipeng Zhang, huangxingbo, weibo, Dong Lin, Yun Gao, Jingsong Li and mumuhhh.
-`}),e.add({id:98,href:"/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-one/",title:"FLIP-147: Support Checkpoints After Tasks Finished - Part One",section:"Flink Blog",content:` Motivation # Flink is a distributed processing engine for both unbounded and bounded streams of data. In recent versions, Flink has unified the DataStream API and the Table / SQL API to support both streaming and batch cases. Since most users require both types of data processing pipelines, the unification helps reduce the complexity of developing, operating, and maintaining consistency between streaming and batch backfilling jobs, like the case for Alibaba.
+`}),e.add({id:99,href:"/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-one/",title:"FLIP-147: Support Checkpoints After Tasks Finished - Part One",section:"Flink Blog",content:` Motivation # Flink is a distributed processing engine for both unbounded and bounded streams of data. In recent versions, Flink has unified the DataStream API and the Table / SQL API to support both streaming and batch cases. Since most users require both types of data processing pipelines, the unification helps reduce the complexity of developing, operating, and maintaining consistency between streaming and batch backfilling jobs, like the case for Alibaba.
 Flink provides two execution modes under the unified programming API: the streaming mode and the batch mode. The streaming mode processes records incrementally based on the states, thus it supports both bounded and unbounded sources. The batch mode works with bounded sources and usually has a better performance for bounded jobs because it executes all the tasks in topological order and avoids random state access by pre-sorting the input records. Although batch mode is often the preferred mode to process bounded jobs, streaming mode is also required for various reasons. For example, users may want to deal with records containing retraction or exploit the property that data is roughly sorted by event times in streaming mode (like the case in Kappa+ Architecture). Moreover, users often have mixed jobs involving both unbounded streams and bounded side-inputs, which also require streaming execution mode.
 Figure 1. A comparison of the Streaming mode and Batch mode for the example Count operator. For streaming mode, the arrived elements are not sorted, the operator would read / write the state corresponding to the element for computation. For batch mode, the arrived elements are first sorted as a whole and then processed. In streaming mode, checkpointing is the vital mechanism in supporting exactly-once guarantees. By periodically snapshotting the aligned states of operators, Flink can recover from the latest checkpoint and continue execution when failover happens. However, previously Flink could not take checkpoints if any task gets finished. This would cause problems for jobs with both bounded and unbounded sources: if there are no checkpoints after the bounded part finished, the unbounded part might need to reprocess a large amount of records in case of a failure.
 Furthermore, being unable to take checkpoints with finished tasks is a problem for jobs using two-phase-commit sinks to achieve end-to-end exactly-once processing. The two-phase-commit sinks first write data to temporary files or external transactions, and commit the data only after a checkpoint completes to ensure the data would not be replayed on failure. However, if a job contains bounded sources, committing the results would not be possible after the bounded sources finish. Also because of that, for bounded jobs we have no way to commit the last piece of data after the first source task finished, and previously the bounded jobs just ignore the uncommitted data when finishing. These behaviors caused a lot of confusion and are always asked in the user mailing list.
@@ -1608,7 +1618,7 @@
 Based on this thought, as shown in the right part of Figure 3, to decoupled the process of &ldquo;finishing operator logic&rdquo; and &ldquo;finishing tasks&rdquo;, we introduced a new EndOfData event. For each task, after executing all the operator logic it would first notify the descendants with an EndOfData event so that the descendants also have chances to finish executing the operator logic. Then all the tasks could wait for the next checkpoint or the specified savepoint concurrently to commit all the remaining data before getting finished.
 At last, it is also worthy to mention we have clarified and renamed the close() and dispose() methods in the operators’ lifecycle. The two methods are in fact different since close() is only called when the task finishes normally and dispose() is called in both cases of normal finishing and failover. However, this was not clear from their names. Therefore, we rename the two methods to finish() and close():
 finish() marks the termination of the operator and no more records are allowed after finish() is called. It should only be called when sources are finished or when the -–drain parameter is specified. close() is used to do cleanup and release all the held resources. Conclusion # By supporting the checkpoints after tasks finished and revising the process of finishing, we can support checkpoints for jobs with both bounded and unbounded sources, and ensure the bounded job gets all output records committed before it finishes. The motivation behind this change is to ensure data consistency, results completeness, and failure recovery if there are bounded sources in the pipeline. The final checkpoint mechanism was first implemented in Flink 1.14 and enabled by default in Flink 1.15. If you have any questions, please feel free to start a discussion or report an issue in the dev or user mailing list.
-`}),e.add({id:99,href:"/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-two/",title:"FLIP-147: Support Checkpoints After Tasks Finished - Part Two",section:"Flink Blog",content:`In the first part of this blog, we have briefly introduced the work to support checkpoints after tasks get finished and revised the process of finishing. In this part we will present more details on the implementation, including how we support checkpoints with finished tasks and the revised protocol of the finish process.
+`}),e.add({id:100,href:"/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-two/",title:"FLIP-147: Support Checkpoints After Tasks Finished - Part Two",section:"Flink Blog",content:`In the first part of this blog, we have briefly introduced the work to support checkpoints after tasks get finished and revised the process of finishing. In this part we will present more details on the implementation, including how we support checkpoints with finished tasks and the revised protocol of the finish process.
 Implementation of support Checkpointing with Finished Tasks # As described in part one, to support checkpoints after some tasks are finished, the core idea is to mark the finished operators in checkpoints and skip executing these operators after recovery. To implement this idea, we enhanced the checkpointing procedure to generate the flag and use the flag on recovery. This section presents more details on the process of taking checkpoints with finished tasks and recovery from such checkpoints.
 Previously, checkpointing only worked when all tasks were running. As shown in the Figure 1, in this case the checkpoint coordinator first notify all the source tasks, and then the source tasks further notify the downstream tasks to take snapshots via barrier events. Similarly, if there are finished tasks, we need to find the new &ldquo;source&rdquo; tasks to initiate the checkpoint, namely those tasks that are still running but have no running precedent tasks. CheckpointCoordinator does the computation atomically at the JobManager side based on the latest states recorded in the execution graph.
 There might be race conditions when triggering tasks: when the checkpoint coordinator decides to trigger one task and starts emitting the RPC, it is possible that the task is just finished and reporting the FINISHED status to JobManager. In this case, the RPC message would fail and the checkpoint would be aborted.
@@ -1631,16 +1641,16 @@
 Task E is a bit different in that it has two inputs. Task A might continue to run for a while and, thus, Task E needs to wait until it receives an EndOfData event also from the other input before finishing operators and its final checkpoint might be different.
 On the other hand, when using stop-with-savepoint [--drain], the process is similar except that all the tasks need to wait for the exact savepoint before finishing instead of just any checkpoints. Moreover, since both Task C and Task A would finish at the same time, Task E would also be able to wait for this particular savepoint before finishing.
 Conclusion # In this part we have presented more details of how the checkpoints are taken with finished tasks and the revised process of finishing. We hope the details could provide more insights of the thoughts and implementations for this part of work. Still, if you have any questions, please feel free to start a discussion or report an issue in the dev or user mailing list.
-`}),e.add({id:100,href:"/2022/07/06/apache-flink-1.15.1-release-announcement/",title:"Apache Flink 1.15.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.15 series.
+`}),e.add({id:101,href:"/2022/07/06/apache-flink-1.15.1-release-announcement/",title:"Apache Flink 1.15.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.15 series.
 This release includes 62 bug fixes, vulnerability fixes, and minor improvements for Flink 1.15. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users upgrade to Flink 1.15.1.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.15.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java&lt;/artifactId&gt; &lt;version&gt;1.15.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients&lt;/artifactId&gt; &lt;version&gt;1.15.1&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
 Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.15.1 Release Notes # The community is aware of 3 issues that were introduced with 1.15.0 that remain unresolved. Efforts are underway to fix these issues for Flink 1.15.2:
-[FLINK-28861] - Non-deterministic UID generation might cause issues during restore for Table/SQL API [FLINK-28060] - Kafka commit on checkpointing fails repeatedly after a broker restart [FLINK-28322] - DataStreamScanProvider's new method is not compatible Bug [FLINK-22984] - UnsupportedOperationException when using Python UDF to generate watermark [FLINK-24491] - ExecutionGraphInfo may not be archived when the dispatcher terminates [FLINK-24735] - SQL client crashes with \`Cannot add expression of different type to set\` [FLINK-26645] - Pulsar Source subscribe to a single topic partition will consume all partitions from that topic [FLINK-27041] - KafkaSource in batch mode failing if any topic partition is empty [FLINK-27140] - Move JobResultStore dirty entry creation into ioExecutor [FLINK-27174] - Non-null check for bootstrapServers field is incorrect in KafkaSink [FLINK-27218] - Serializer in OperatorState has not been updated when new Serializers are NOT incompatible [FLINK-27223] - State access doesn&#39;t work as expected when cache size is set to 0 [FLINK-27247] - ScalarOperatorGens.numericCasting is not compatible with legacy behavior [FLINK-27255] - Flink-avro does not support serialization and deserialization of avro schema longer than 65535 characters [FLINK-27282] - Fix the bug of wrong positions mapping in RowCoder [FLINK-27367] - SQL CAST between INT and DATE is broken [FLINK-27368] - SQL CAST(&#39; 1 &#39; as BIGINT) returns wrong result [FLINK-27409] - Cleanup stale slot allocation record when the resource requirement of a job is empty [FLINK-27418] - Flink SQL TopN result is wrong [FLINK-27420] - Suspended SlotManager fails to re-register metrics when started again [FLINK-27465] - AvroRowDeserializationSchema.convertToTimestamp fails with negative nano seconds [FLINK-27487] - KafkaMetricWrappers do incorrect cast [FLINK-27545] - Update examples in PyFlink shell [FLINK-27563] - Resource Providers - Yarn doc page has minor display error [FLINK-27606] - CompileException when using UDAF with merge() method [FLINK-27676] - Output records from on_timer are behind the triggering watermark in PyFlink [FLINK-27683] - Insert into (column1, column2) Values(.....) fails with SQL hints [FLINK-27711] - Correct the typo of set_topics_pattern by changing it to set_topic_pattern for Pulsar Connector [FLINK-27733] - Rework on_timer output behind watermark bug fix [FLINK-27734] - Not showing checkpoint interval properly in WebUI when checkpoint is disabled [FLINK-27760] - NPE is thrown when executing PyFlink jobs in batch mode [FLINK-27762] - Kafka WakeupException during handling splits changes [FLINK-27797] - PythonTableUtils.getCollectionInputFormat cannot correctly handle None values [FLINK-27848] - ZooKeeperLeaderElectionDriver keeps writing leader information, using up zxid [FLINK-27881] - The key(String) in PulsarMessageBuilder returns null [FLINK-27890] - SideOutputExample.java fails [FLINK-27910] - FileSink not enforcing rolling policy if started from scratch [FLINK-27933] - Savepoint status cannot be queried from standby jobmanager [FLINK-27955] - PyFlink installation failure on Windows OS [FLINK-27999] - NoSuchMethodError when using Hive 3 dialect [FLINK-28018] - the start index to create empty splits in BinaryInputFormat#createInputSplits is inappropriate [FLINK-28019] - Error in RetractableTopNFunction when retracting a stale record with state ttl enabled [FLINK-28114] - The path of the Python client interpreter could not point to an archive file in distributed file system Improvement [FLINK-24586] - SQL functions should return STRING instead of VARCHAR(2000) [FLINK-26788] - AbstractDeserializationSchema should add cause when throwing a FlinkRuntimeException [FLINK-26909] - Allow setting parallelism to -1 from CLI [FLINK-27064] - Centralize ArchUnit rules for production code [FLINK-27480] - KafkaSources sharing the groupId might lead to InstanceAlreadyExistException warning [FLINK-27534] - Apply scalafmt to 1.15 branch [FLINK-27776] - Throw exception when UDAF used in sliding window does not implement merge method in PyFlink [FLINK-27935] - Add Pyflink example of create temporary view document Technical Debt [FLINK-25694] - Upgrade Presto to resolve GSON/Alluxio Vulnerability Sub-task [FLINK-26052] - Update chinese documentation regarding FLIP-203 [FLINK-26588] - Translate the new SQL CAST documentation to Chinese [FLINK-27382] - Make Job mode wait with cluster shutdown until the cleanup is done `}),e.add({id:101,href:"/2022/06/22/apache-flink-1.14.5-release-announcement/",title:"Apache Flink 1.14.5 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.14.
+[FLINK-28861] - Non-deterministic UID generation might cause issues during restore for Table/SQL API [FLINK-28060] - Kafka commit on checkpointing fails repeatedly after a broker restart [FLINK-28322] - DataStreamScanProvider's new method is not compatible Bug [FLINK-22984] - UnsupportedOperationException when using Python UDF to generate watermark [FLINK-24491] - ExecutionGraphInfo may not be archived when the dispatcher terminates [FLINK-24735] - SQL client crashes with \`Cannot add expression of different type to set\` [FLINK-26645] - Pulsar Source subscribe to a single topic partition will consume all partitions from that topic [FLINK-27041] - KafkaSource in batch mode failing if any topic partition is empty [FLINK-27140] - Move JobResultStore dirty entry creation into ioExecutor [FLINK-27174] - Non-null check for bootstrapServers field is incorrect in KafkaSink [FLINK-27218] - Serializer in OperatorState has not been updated when new Serializers are NOT incompatible [FLINK-27223] - State access doesn&#39;t work as expected when cache size is set to 0 [FLINK-27247] - ScalarOperatorGens.numericCasting is not compatible with legacy behavior [FLINK-27255] - Flink-avro does not support serialization and deserialization of avro schema longer than 65535 characters [FLINK-27282] - Fix the bug of wrong positions mapping in RowCoder [FLINK-27367] - SQL CAST between INT and DATE is broken [FLINK-27368] - SQL CAST(&#39; 1 &#39; as BIGINT) returns wrong result [FLINK-27409] - Cleanup stale slot allocation record when the resource requirement of a job is empty [FLINK-27418] - Flink SQL TopN result is wrong [FLINK-27420] - Suspended SlotManager fails to re-register metrics when started again [FLINK-27465] - AvroRowDeserializationSchema.convertToTimestamp fails with negative nano seconds [FLINK-27487] - KafkaMetricWrappers do incorrect cast [FLINK-27545] - Update examples in PyFlink shell [FLINK-27563] - Resource Providers - Yarn doc page has minor display error [FLINK-27606] - CompileException when using UDAF with merge() method [FLINK-27676] - Output records from on_timer are behind the triggering watermark in PyFlink [FLINK-27683] - Insert into (column1, column2) Values(.....) fails with SQL hints [FLINK-27711] - Correct the typo of set_topics_pattern by changing it to set_topic_pattern for Pulsar Connector [FLINK-27733] - Rework on_timer output behind watermark bug fix [FLINK-27734] - Not showing checkpoint interval properly in WebUI when checkpoint is disabled [FLINK-27760] - NPE is thrown when executing PyFlink jobs in batch mode [FLINK-27762] - Kafka WakeupException during handling splits changes [FLINK-27797] - PythonTableUtils.getCollectionInputFormat cannot correctly handle None values [FLINK-27848] - ZooKeeperLeaderElectionDriver keeps writing leader information, using up zxid [FLINK-27881] - The key(String) in PulsarMessageBuilder returns null [FLINK-27890] - SideOutputExample.java fails [FLINK-27910] - FileSink not enforcing rolling policy if started from scratch [FLINK-27933] - Savepoint status cannot be queried from standby jobmanager [FLINK-27955] - PyFlink installation failure on Windows OS [FLINK-27999] - NoSuchMethodError when using Hive 3 dialect [FLINK-28018] - the start index to create empty splits in BinaryInputFormat#createInputSplits is inappropriate [FLINK-28019] - Error in RetractableTopNFunction when retracting a stale record with state ttl enabled [FLINK-28114] - The path of the Python client interpreter could not point to an archive file in distributed file system Improvement [FLINK-24586] - SQL functions should return STRING instead of VARCHAR(2000) [FLINK-26788] - AbstractDeserializationSchema should add cause when throwing a FlinkRuntimeException [FLINK-26909] - Allow setting parallelism to -1 from CLI [FLINK-27064] - Centralize ArchUnit rules for production code [FLINK-27480] - KafkaSources sharing the groupId might lead to InstanceAlreadyExistException warning [FLINK-27534] - Apply scalafmt to 1.15 branch [FLINK-27776] - Throw exception when UDAF used in sliding window does not implement merge method in PyFlink [FLINK-27935] - Add Pyflink example of create temporary view document Technical Debt [FLINK-25694] - Upgrade Presto to resolve GSON/Alluxio Vulnerability Sub-task [FLINK-26052] - Update chinese documentation regarding FLIP-203 [FLINK-26588] - Translate the new SQL CAST documentation to Chinese [FLINK-27382] - Make Job mode wait with cluster shutdown until the cleanup is done `}),e.add({id:102,href:"/2022/06/22/apache-flink-1.14.5-release-announcement/",title:"Apache Flink 1.14.5 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.14.
 This release includes 67 bugs, vulnerability fixes and minor improvements for Flink 1.14. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.14.5.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.14.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.5&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.14.5 Release Notes # Sub-task [FLINK-25800] - Update wrong links in the datastream/execution_mode.md page. Bug [FLINK-22984] - UnsupportedOperationException when using Python UDF to generate watermark [FLINK-24491] - ExecutionGraphInfo may not be archived when the dispatcher terminates [FLINK-25227] - Comparing the equality of the same (boxed) numeric values returns false [FLINK-25440] - Apache Pulsar Connector Document description error about 'Starting Position'. [FLINK-25904] - NullArgumentException when accessing checkpoint stats on standby JobManager [FLINK-26016] - FileSystemLookupFunction does not produce correct results when hive table uses columnar storage [FLINK-26018] - Unnecessary late events when using the new KafkaSource [FLINK-26049] - The tolerable-failed-checkpoints logic is invalid when checkpoint trigger failed [FLINK-26285] - ZooKeeperStateHandleStore does not handle not existing nodes properly in getAllAndLock [FLINK-26334] - When timestamp - offset + windowSize < 0, elements cannot be assigned to the correct window [FLINK-26381] - Wrong document order of Chinese version [FLINK-26395] - The description of RAND_INTEGER is wrong in SQL function documents [FLINK-26504] - Fix the incorrect type error in unbounded Python UDAF [FLINK-26536] - PyFlink RemoteKeyedStateBackend#merge_namespaces bug [FLINK-26543] - Fix the issue that exceptions generated in startup are missed in Python loopback mode [FLINK-26550] - Correct the information of checkpoint failure [FLINK-26607] - There are multiple MAX_LONG_VALUE value errors in pyflink code [FLINK-26629] - Error in code comment for SubtaskStateMapper.RANGE [FLINK-26645] - Pulsar Source subscribe to a single topic partition will consume all partitions from that topic [FLINK-26708] - TimestampsAndWatermarksOperator should not propagate WatermarkStatus [FLINK-26738] - Default value of StateDescriptor is valid when enable state ttl config [FLINK-26775] - PyFlink WindowOperator#process_element register wrong cleanup timer [FLINK-26846] - Gauge metrics doesn't work in PyFlink [FLINK-26855] - ImportError: cannot import name 'environmentfilter' from 'jinja2' [FLINK-26920] - Job executes failed with "The configured managed memory fraction for Python worker process must be within (0, 1], was: %s." [FLINK-27108] - State cache clean up doesn't work as expected [FLINK-27174] - Non-null check for bootstrapServers field is incorrect in KafkaSink [FLINK-27223] - State access doesn't work as expected when cache size is set to 0 [FLINK-27255] - Flink-avro does not support serialization and deserialization of avro schema longer than 65535 characters [FLINK-27315] - Fix the demo of MemoryStateBackendMigration [FLINK-27409] - Cleanup stale slot allocation record when the resource requirement of a job is empty [FLINK-27442] - Module flink-sql-avro-confluent-registry does not configure Confluent repo [FLINK-27545] - Update examples in PyFlink shell [FLINK-27676] - Output records from on_timer are behind the triggering watermark in PyFlink [FLINK-27733] - Rework on_timer output behind watermark bug fix [FLINK-27751] - Dependency resolution from repository.jboss.org fails on CI [FLINK-27760] - NPE is thrown when executing PyFlink jobs in batch mode New Feature [FLINK-26382] - Add Chinese documents for flink-training exercises Improvement [FLINK-5151] - Add discussion about object mutations to heap-based state backend docs. [FLINK-23843] - Exceptions during "SplitEnumeratorContext.runInCoordinatorThread()" should cause Global Failure instead of Process Kill [FLINK-24274] - Wrong parameter order in documentation of State Processor API [FLINK-24384] - Count checkpoints failed in trigger phase into numberOfFailedCheckpoints [FLINK-26130] - Document why and when user would like to increase network buffer size [FLINK-26575] - Improve the info message when restoring keyed state backend [FLINK-26650] - Avoid to print stack trace for checkpoint trigger failure if not all tasks are started [FLINK-26788] - AbstractDeserializationSchema should add cause when thow a FlinkRuntimeException [FLINK-27088] - The example of using StringDeserializer for deserializing Kafka message value as string has an error [FLINK-27480] - KafkaSources sharing the groupId might lead to InstanceAlreadyExistException warning [FLINK-27776] - Throws exception when udaf used in sliding window does not implement merge method in PyFlink Technical Debt [FLINK-25694] - Upgrade Presto to resolve GSON/Alluxio Vulnerability [FLINK-26352] - Missing license header in WebUI source files [FLINK-26961] - Update multiple Jackson dependencies to v2.13.2 and v2.13.2.1 `}),e.add({id:102,href:"/2022/06/17/adaptive-batch-scheduler-automatically-decide-parallelism-of-flink-batch-jobs/",title:"Adaptive Batch Scheduler: Automatically Decide Parallelism of Flink Batch Jobs",section:"Flink Blog",content:` Introduction # Deciding proper parallelisms of operators is not an easy work for many users. For batch jobs, a small parallelism may result in long execution time and big failover regression. While an unnecessary large parallelism may result in resource waste and more overhead cost in task deployment and network shuffling.
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.14.5 Release Notes # Sub-task [FLINK-25800] - Update wrong links in the datastream/execution_mode.md page. Bug [FLINK-22984] - UnsupportedOperationException when using Python UDF to generate watermark [FLINK-24491] - ExecutionGraphInfo may not be archived when the dispatcher terminates [FLINK-25227] - Comparing the equality of the same (boxed) numeric values returns false [FLINK-25440] - Apache Pulsar Connector Document description error about 'Starting Position'. [FLINK-25904] - NullArgumentException when accessing checkpoint stats on standby JobManager [FLINK-26016] - FileSystemLookupFunction does not produce correct results when hive table uses columnar storage [FLINK-26018] - Unnecessary late events when using the new KafkaSource [FLINK-26049] - The tolerable-failed-checkpoints logic is invalid when checkpoint trigger failed [FLINK-26285] - ZooKeeperStateHandleStore does not handle not existing nodes properly in getAllAndLock [FLINK-26334] - When timestamp - offset + windowSize < 0, elements cannot be assigned to the correct window [FLINK-26381] - Wrong document order of Chinese version [FLINK-26395] - The description of RAND_INTEGER is wrong in SQL function documents [FLINK-26504] - Fix the incorrect type error in unbounded Python UDAF [FLINK-26536] - PyFlink RemoteKeyedStateBackend#merge_namespaces bug [FLINK-26543] - Fix the issue that exceptions generated in startup are missed in Python loopback mode [FLINK-26550] - Correct the information of checkpoint failure [FLINK-26607] - There are multiple MAX_LONG_VALUE value errors in pyflink code [FLINK-26629] - Error in code comment for SubtaskStateMapper.RANGE [FLINK-26645] - Pulsar Source subscribe to a single topic partition will consume all partitions from that topic [FLINK-26708] - TimestampsAndWatermarksOperator should not propagate WatermarkStatus [FLINK-26738] - Default value of StateDescriptor is valid when enable state ttl config [FLINK-26775] - PyFlink WindowOperator#process_element register wrong cleanup timer [FLINK-26846] - Gauge metrics doesn't work in PyFlink [FLINK-26855] - ImportError: cannot import name 'environmentfilter' from 'jinja2' [FLINK-26920] - Job executes failed with "The configured managed memory fraction for Python worker process must be within (0, 1], was: %s." [FLINK-27108] - State cache clean up doesn't work as expected [FLINK-27174] - Non-null check for bootstrapServers field is incorrect in KafkaSink [FLINK-27223] - State access doesn't work as expected when cache size is set to 0 [FLINK-27255] - Flink-avro does not support serialization and deserialization of avro schema longer than 65535 characters [FLINK-27315] - Fix the demo of MemoryStateBackendMigration [FLINK-27409] - Cleanup stale slot allocation record when the resource requirement of a job is empty [FLINK-27442] - Module flink-sql-avro-confluent-registry does not configure Confluent repo [FLINK-27545] - Update examples in PyFlink shell [FLINK-27676] - Output records from on_timer are behind the triggering watermark in PyFlink [FLINK-27733] - Rework on_timer output behind watermark bug fix [FLINK-27751] - Dependency resolution from repository.jboss.org fails on CI [FLINK-27760] - NPE is thrown when executing PyFlink jobs in batch mode New Feature [FLINK-26382] - Add Chinese documents for flink-training exercises Improvement [FLINK-5151] - Add discussion about object mutations to heap-based state backend docs. [FLINK-23843] - Exceptions during "SplitEnumeratorContext.runInCoordinatorThread()" should cause Global Failure instead of Process Kill [FLINK-24274] - Wrong parameter order in documentation of State Processor API [FLINK-24384] - Count checkpoints failed in trigger phase into numberOfFailedCheckpoints [FLINK-26130] - Document why and when user would like to increase network buffer size [FLINK-26575] - Improve the info message when restoring keyed state backend [FLINK-26650] - Avoid to print stack trace for checkpoint trigger failure if not all tasks are started [FLINK-26788] - AbstractDeserializationSchema should add cause when thow a FlinkRuntimeException [FLINK-27088] - The example of using StringDeserializer for deserializing Kafka message value as string has an error [FLINK-27480] - KafkaSources sharing the groupId might lead to InstanceAlreadyExistException warning [FLINK-27776] - Throws exception when udaf used in sliding window does not implement merge method in PyFlink Technical Debt [FLINK-25694] - Upgrade Presto to resolve GSON/Alluxio Vulnerability [FLINK-26352] - Missing license header in WebUI source files [FLINK-26961] - Update multiple Jackson dependencies to v2.13.2 and v2.13.2.1 `}),e.add({id:103,href:"/2022/06/17/adaptive-batch-scheduler-automatically-decide-parallelism-of-flink-batch-jobs/",title:"Adaptive Batch Scheduler: Automatically Decide Parallelism of Flink Batch Jobs",section:"Flink Blog",content:` Introduction # Deciding proper parallelisms of operators is not an easy work for many users. For batch jobs, a small parallelism may result in long execution time and big failover regression. While an unnecessary large parallelism may result in resource waste and more overhead cost in task deployment and network shuffling.
 To decide a proper parallelism, one needs to know how much data each operator needs to process. However, It can be hard to predict data volume to be processed by a job because it can be different everyday. And it can be harder or even impossible (due to complex operators or UDFs) to predict data volume to be processed by each operator.
 To solve this problem, we introduced the adaptive batch scheduler in Flink 1.15. The adaptive batch scheduler can automatically decide parallelism of an operator according to the size of its consumed datasets. Here are the benefits the adaptive batch scheduler can bring:
 Batch job users can be relieved from parallelism tuning. Parallelism tuning is fine grained considering different operators. This is particularly beneficial for SQL jobs which can only be set with a global parallelism previously. Parallelism tuning can better fit consumed datasets which have a varying volume size every day. Get Started # To automatically decide parallelism of operators, you need to:
@@ -1675,7 +1685,7 @@
 The scheduler will try to decide the parallelism of all job vertices before handling each scheduling event, and the parallelism decision will be made for each job vertex in topological order:
 For source vertices, the parallelism should have been decided before starting scheduling. For non-source vertices, the parallelism can be decided only when all its consumed results are fully produced. After deciding the parallelism, the scheduler will try to initialize the job vertices in topological order. A job vertex that can be initialized should meet the following conditions:
 The parallelism of the job vertex has been decided and the job vertex has not been initialized yet. All upstream job vertices have been initialized. Future improvement # Auto-rebalancing of workloads # When running batch jobs, data skew may occur (a task needs to process much larger data than other tasks), which leads to long-tail tasks and further slows down the finish of jobs. Users usually hope that the system can automatically solve this problem. One typical data skew case is that some subpartitions have a significantly larger amount of data than others. This case can be solved by finer grained subpartitions and auto-rebalancing of workload. The work of the adaptive batch scheduler can be considered as the first step towards it, because the requirements of auto-rebalancing are similar to adaptive batch scheduler, they both need the support of dynamic graphs and the collection of result partitions size. Based on the implementation of adaptive batch scheduler, we can solve the above problem by increasing max parallelism (for finer grained subpartitions) and simply changing the subpartition range division algorithm (for auto-rebalancing). In the current design, the subpartition range is divided according to the number of subpartitions, we can change it to divide according to the amount of data in subpartitions, so that the amount of data within each subpartition range can be approximately the same. In this way, workloads of downstream tasks can be balanced.
-Fig. 5 - Auto-rebalance with finer grained subpartitions `}),e.add({id:103,href:"/2022/06/05/apache-flink-kubernetes-operator-1.0.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.0.0 Release Announcement",section:"Flink Blog",content:`In the last two months since our initial preview release the community has been hard at work to stabilize and improve the core Flink Kubernetes Operator logic. We are now proud to announce the first production ready release of the operator project.
+Fig. 5 - Auto-rebalance with finer grained subpartitions `}),e.add({id:104,href:"/2022/06/05/apache-flink-kubernetes-operator-1.0.0-release-announcement/",title:"Apache Flink Kubernetes Operator 1.0.0 Release Announcement",section:"Flink Blog",content:`In the last two months since our initial preview release the community has been hard at work to stabilize and improve the core Flink Kubernetes Operator logic. We are now proud to announce the first production ready release of the operator project.
 Release Highlights # The Flink Kubernetes Operator 1.0.0 version brings numerous improvements and new features to almost every aspect of the operator.
 New v1beta1 API version &amp; compatibility guarantees Session Job Management support Support for Flink 1.13, 1.14 and 1.15 Deployment recovery and rollback New Operator metrics Improved configuration management Custom validators Savepoint history and cleanup New API version and compatibility guarantees # The 1.0.0 release brings a new API version: v1beta1.
 Don’t let the name confuse you, we consider v1beta1 the first production ready API release, and we will maintain backward compatibility for your applications going forward.
@@ -1698,7 +1708,7 @@
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # The Apache Flink community would like to thank each and every one of the contributors that have made this release possible:
 Aitozi, Biao Geng, ConradJam, Fuyao Li, Gyula Fora, Jaganathan Asokan, James Busche, liuzhuo, Márton Balassi, Matyas Orhidi, Nicholas Jiang, Ted Chang, Thomas Weise, Xin Hao, Yang Wang, Zili Chen
-`}),e.add({id:104,href:"/2022/05/30/improving-speed-and-stability-of-checkpointing-with-generic-log-based-incremental-checkpoints/",title:"Improving speed and stability of checkpointing with generic log-based incremental checkpoints",section:"Flink Blog",content:` Introduction # One of the most important characteristics of stream processing systems is end-to-end latency, i.e. the time it takes for the results of processing an input record to reach the outputs. In the case of Flink, end-to-end latency mostly depends on the checkpointing mechanism, because processing results should only become visible after the state of the stream is persisted to non-volatile storage (this is assuming exactly-once mode; in other modes, results can be published immediately).
+`}),e.add({id:105,href:"/2022/05/30/improving-speed-and-stability-of-checkpointing-with-generic-log-based-incremental-checkpoints/",title:"Improving speed and stability of checkpointing with generic log-based incremental checkpoints",section:"Flink Blog",content:` Introduction # One of the most important characteristics of stream processing systems is end-to-end latency, i.e. the time it takes for the results of processing an input record to reach the outputs. In the case of Flink, end-to-end latency mostly depends on the checkpointing mechanism, because processing results should only become visible after the state of the stream is persisted to non-volatile storage (this is assuming exactly-once mode; in other modes, results can be published immediately).
 Furthermore, сheckpoint duration also defines the reasonable interval with which checkpoints are made. A shorter interval provides the following advantages:
 Lower latency for transactional sinks: Transactional sinks commit on checkpoints, so faster checkpoints mean more frequent commits. More predictable checkpoint intervals: Currently, the duration of a checkpoint depends on the size of the artifacts that need to be persisted in the checkpoint storage. Less work on recovery. The more frequently the checkpoint, the fewer events need to be re-processed after recovery. Following are the main factors affecting checkpoint duration in Flink:
 Barrier travel time and alignment duration Time to take state snapshot and persist it onto the durable highly-available storage (such as S3) Recent improvements such as Unaligned checkpoints and Buffer debloating try to address (1), especially in the presence of back-pressure. Previously, Incremental checkpoints were introduced to reduce the size of a snapshot, thereby reducing the time required to store it (2).
@@ -1742,7 +1752,7 @@
 We encourage you to try out this feature and assess the pros and cons of using it in your setup. The simplest way to do this it is to add the following to your flink-conf.yaml:
 state.backend.changelog.enabled: true state.backend.changelog.storage: filesystem dstl.dfs.base-path: &lt;location similar to state.checkpoints.dir&gt; Please see the full documentation here.
 Acknowledgments # We thank Stephan Ewen for the initial idea of the project, and many other engineers including Piotr Nowojski, Yu Li and Yun Tang for design discussions and code reviews.
-References # FLIP-158 generic log-based incremental checkpoints documentation Unaligned checkpoints Buffer debloating Incremental checkpoints `}),e.add({id:105,href:"/2022/05/23/getting-into-low-latency-gears-with-apache-flink-part-two/",title:"Getting into Low-Latency Gears with Apache Flink - Part Two",section:"Flink Blog",content:`This series of blog posts present a collection of low-latency techniques in Flink. In part one, we discussed the types of latency in Flink and the way we measure end-to-end latency and presented a few techniques that optimize latency directly. In this post, we will continue with a few more direct latency optimization techniques. Just like in part one, for each optimization technique, we will clarify what it is, when to use it, and what to keep in mind when using it. We will also show experimental results to support our statements.
+References # FLIP-158 generic log-based incremental checkpoints documentation Unaligned checkpoints Buffer debloating Incremental checkpoints `}),e.add({id:106,href:"/2022/05/23/getting-into-low-latency-gears-with-apache-flink-part-two/",title:"Getting into Low-Latency Gears with Apache Flink - Part Two",section:"Flink Blog",content:`This series of blog posts present a collection of low-latency techniques in Flink. In part one, we discussed the types of latency in Flink and the way we measure end-to-end latency and presented a few techniques that optimize latency directly. In this post, we will continue with a few more direct latency optimization techniques. Just like in part one, for each optimization technique, we will clarify what it is, when to use it, and what to keep in mind when using it. We will also show experimental results to support our statements.
 Direct latency optimization # Spread work across time # When you use timers or do windowing in a job, timer or window firing may create load spikes due to heavy computation or state access. If the allocated resources cannot cope with these load spikes, timer or window firing will take a long time to finish. This often results in high latency.
 To avoid this situation, you should change your code to spread out the workload as much as possible such that you do not accumulate too much work to be done at a single point in time. In the case of windowing, you should consider using incremental window aggregation with AggregateFunction or ReduceFunction. In the case of timers in a ProcessFunction, the operations executed in the onTimer() method should be optimized such that the time spent there is reduced to a minimum. If you see latency spikes resulting from a global aggregation or if you need to collect events in a well-defined order to perform certain computations, you can consider adding a pre-aggregation phase in front of the current operator.
 You can apply this optimization if you are using timer-based processing (e.g., timers, windowing) and an efficient aggregation can be applied whenever an event arrives instead of waiting for timers to fire.
@@ -1763,7 +1773,7 @@
 You can apply this optimization if your job has a sub-second level latency requirement (e.g., hundreds of milliseconds) and the reduced watermarking interval still contributes a significant part of the latency.
 Keep in mind that this may change your job logic considerably since you have to deal with out-of-order events by yourself.
 Summary # Following part one, this blog post presented a few more latency optimization techniques with a focus on direct latency optimization. In the next part, we will focus on techniques that optimize latency by increasing throughput. Stay tuned!
-`}),e.add({id:106,href:"/2022/05/18/getting-into-low-latency-gears-with-apache-flink-part-one/",title:"Getting into Low-Latency Gears with Apache Flink - Part One",section:"Flink Blog",content:`Apache Flink is a stream processing framework well known for its low latency processing capabilities. It is generic and suitable for a wide range of use cases. As a Flink application developer or a cluster administrator, you need to find the right gear that is best for your application. In other words, you don&rsquo;t want to be driving a luxury sports car while only using the first gear.
+`}),e.add({id:107,href:"/2022/05/18/getting-into-low-latency-gears-with-apache-flink-part-one/",title:"Getting into Low-Latency Gears with Apache Flink - Part One",section:"Flink Blog",content:`Apache Flink is a stream processing framework well known for its low latency processing capabilities. It is generic and suitable for a wide range of use cases. As a Flink application developer or a cluster administrator, you need to find the right gear that is best for your application. In other words, you don&rsquo;t want to be driving a luxury sports car while only using the first gear.
 In this multi-part series, we will present a collection of low-latency techniques in Flink. Part one starts with types of latency in Flink and the way we measure the end-to-end latency, followed by a few techniques that optimize latency directly. Part two continues with a few more direct latency optimization techniques. Further parts of this series will cover techniques that improve latencies by optimizing throughput. For each optimization technique, we will clarify what it is, when to use it, and what to keep in mind when using it. We will also show experimental results to support our statements.
 This series of blog posts is a write-up of our talk in Flink Forward Global 2021 and includes additional latency optimization techniques and details.
 Latency # Types of latency # Latency can refer to different things. LatencyMarkers in Flink measure the time it takes for the markers to travel from each source operator to each downstream operator. As LatencyMarkers bypass user functions in operators, the measured latencies do not reflect the entire end-to-end latency but only a part of it. Flink also supports tracking the state access latency, which measures the response latency when state is read/written. One can also manually measure the time taken by some operators, or get this data with profilers. However, what users usually care about is the end-to-end latency, including the time spent in user-defined functions, in the stream processing framework, and when state is accessed. End-to-end latency is what we will focus on.
@@ -1788,7 +1798,7 @@
 Keep in mind that network buffer timeout that is too low may reduce throughput.
 As seen in the following experiment results, by using execution.buffer-timeout: 10 ms in WindowingJob, we again reduced the latency (now to 370ms).
 Summary # In part one of this multi-part series, we discussed types of latency in Flink and the way we measure end-to-end latency. Then we presented a few latency optimization techniques with a focus on direct latency optimization. For each technique, we explained what it is, when to use it, and what to keep in mind when using it. Part two will continue with a few more direct latency optimization techniques. Stay tuned!
-`}),e.add({id:107,href:"/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/",title:"Apache Flink Table Store 0.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the preview release of the Apache Flink Table Store (0.1.0).
+`}),e.add({id:108,href:"/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/",title:"Apache Flink Table Store 0.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the preview release of the Apache Flink Table Store (0.1.0).
 Please check out the full documentation for detailed information and user guides.
 Note: Flink Table Store is still in beta status and undergoing rapid development. We do not recommend that you use it directly in a production environment.
 What is Flink Table Store # In the past years, thanks to our numerous contributors and users, Apache Flink has established itself as one of the best distributed computing engines, especially for stateful stream processing at large scale. However, there are still a few challenges people are facing when they try to obtain insights from their data in real-time. Among these challenges, one prominent problem is lack of storage that caters to all the computing patterns.
@@ -1806,7 +1816,7 @@
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # The Apache Flink community would like to thank every one of the contributors that have made this release possible:
 Jane Chan, Jiangjie (Becket) Qin, Jingsong Lee, Leonard Xu, Nicholas Jiang, Shen Zhu, tsreaper, Yubin Li
-`}),e.add({id:108,href:"/2022/05/06/exploring-the-thread-mode-in-pyflink/",title:"Exploring the thread mode in PyFlink",section:"Flink Blog",content:`PyFlink was introduced in Flink 1.9 which purpose is to bring the power of Flink to Python users and allow Python users to develop Flink jobs in Python language. The functionality becomes more and more mature through the development in the past releases.
+`}),e.add({id:109,href:"/2022/05/06/exploring-the-thread-mode-in-pyflink/",title:"Exploring the thread mode in PyFlink",section:"Flink Blog",content:`PyFlink was introduced in Flink 1.9 which purpose is to bring the power of Flink to Python users and allow Python users to develop Flink jobs in Python language. The functionality becomes more and more mature through the development in the past releases.
 Before Flink 1.15, Python user-defined functions will be executed in separate Python processes (based on the Apache Beam Portability Framework). It will bring additional serialization/deserialization overhead and also communication overhead. In scenarios where the data size is big, e.g. image processing, etc, this overhead becomes non-negligible. Besides, since it involves inter-process communication, the processing latency is also non-negligible, which is unacceptable in scenarios where the latency is critical, e.g. quantitative transaction, etc.
 In Flink 1.15, we have introduced a new execution mode named &rsquo;thread&rsquo; mode (based on PEMJA) where the Python user-defined functions will be executed in the JVM as a thread instead of a separate Python process. In this article, we will dig into the details about this execution mode and also share some benchmark data to give users a basic understanding of how it works and which scenarios it’s applicable for.
 Process Mode # Fig. 1 - PyFlink Architecture Overview From Fig. 1, we can see the architecture of PyFlink. As shown on the left side of Fig.1, users could use PyFlink API(Python Table API &amp; SQL or Python DataStream API) to declare the logic of jobs, which will be finally translated into JobGraph (DAG of the job) which could be recognized by Flink’s execution framework. It should be noted that Python operators (Flink operators whose purpose is to execute Python user-defined functions) will be used to execute the Python user-defined functions.
@@ -1848,7 +1858,7 @@
 When the performance of Python UDF is close to that of Java UDF, the end-to-end performance of thread mode will be close to that of Java UDF.
 Summary &amp; Future work # In this article, we have introduced the &rsquo;thread&rsquo; execution mode in PyFlink which is a new feature introduced in Flink 1.15. Compared with the &lsquo;process&rsquo; execution mode, users will get better performance, lower latency, less checkpoint time in &rsquo;thread&rsquo; mode. However, there are also some limitations about &rsquo;thread&rsquo; mode, e.g. poor support for session deployment mode, etc.
 It should be noted that since this is still the first release of &rsquo;thread&rsquo; mode, currently there are still many limitations about it, e.g. it only supports Python ScalarFunction of Python Table API &amp; SQL. We&rsquo;re planning to extend it to other places where Python user-defined functions could be used in next releases.
-`}),e.add({id:109,href:"/2022/05/06/improvements-to-flink-operations-snapshots-ownership-and-savepoint-formats/",title:"Improvements to Flink operations: Snapshots Ownership and Savepoint Formats",section:"Flink Blog",content:`Flink has become a well established data streaming engine and a mature project requires some shifting of priorities from thinking purely about new features towards improving stability and operational simplicity. In the last couple of releases, the Flink community has tried to address some known friction points, which includes improvements to the snapshotting process. Snapshotting takes a global, consistent image of the state of a Flink job and is integral to fault-tolerance and exacty-once processing. Snapshots include savepoints and checkpoints.
+`}),e.add({id:110,href:"/2022/05/06/improvements-to-flink-operations-snapshots-ownership-and-savepoint-formats/",title:"Improvements to Flink operations: Snapshots Ownership and Savepoint Formats",section:"Flink Blog",content:`Flink has become a well established data streaming engine and a mature project requires some shifting of priorities from thinking purely about new features towards improving stability and operational simplicity. In the last couple of releases, the Flink community has tried to address some known friction points, which includes improvements to the snapshotting process. Snapshotting takes a global, consistent image of the state of a Flink job and is integral to fault-tolerance and exacty-once processing. Snapshots include savepoints and checkpoints.
 This post will outline the journey of improving snapshotting in past releases and the upcoming improvements in Flink 1.15, which includes making it possible to take savepoints in the native state backend specific format as well as clarifying snapshots ownership.
 Past improvements to the snapshotting process # Flink 1.13 was the first release where we announced unaligned checkpoints to be production-ready. We encouraged people to use them if their jobs are backpressured to a point where it causes issues for checkpoints. We also unified the binary format of savepoints across all different state backends, which enables stateful switching of savepoints.
 Flink 1.14 also brought additional improvements. As an alternative and as a complement to unaligned checkpoints, we introduced a feature called &ldquo;buffer debloating&rdquo;. This is built around the concept of automatically adjusting the amount of in-flight data that needs to be aligned while snapshotting. We also fixed another long-standing problem and made it possible to continue checkpointing even if there are finished tasks in a JobGraph.
@@ -1873,7 +1883,7 @@
 Having additional dimensions of properties in each of the two main snapshots category does not make it easier, therefore we try to list what you can achieve with every type of snapshot.
 The following table gives an overview of capabilities and limitations for the various types of savepoints and checkpoints.
 ✓ - Flink fully supports this type of snapshot x - Flink doesn&rsquo;t support this type of snapshot Operation Canonical Savepoint Native Savepoint Aligned Checkpoint Unaligned Checkpoint State backend change ✓ x x x State Processor API(writing) ✓ x x x State Processor API(reading) ✓ ✓ ✓ x Self-contained and relocatable ✓ ✓ x x Schema evolution ✓ ✓ ✓ ✓ Arbitrary job upgrade ✓ ✓ ✓ x Non-arbitrary job upgrade ✓ ✓ ✓ x Flink minor version upgrade ✓ ✓ ✓ x Flink bug/patch version upgrade ✓ ✓ ✓ ✓ Rescaling ✓ ✓ ✓ ✓ State backend change - you can restore from the snapshot with a different state.backend than the one for which the snapshot was taken State Processor API (writing) - The ability to create new snapshot via State Processor API. State Processor API (reading) - The ability to read state from the existing snapshot via State Processor API. Self-contained and relocatable - One snapshot directory contains everything it needs for recovery. You can move the directory around. Schema evolution - Changing the data type of the state in your UDFs. Arbitrary job upgrade - Restoring the snapshot with the different partitioning type(rescale, rebalance, map, etc.) or with a different record type for the existing operator. In other words you can add arbitrary operators anywhere in your job graph. Non-arbitrary job upgrade - In contrary to the above, you still should be able to add new operators, but certain limitations apply. You can not change partitioning for existing operators or the data type of records being exchanged. Flink minor version upgrade - Restoring a snapshot which was taken for an older minor version of Flink (1.x → 1.y). Flink bug/patch version upgrade - Restoring a snapshot which was taken for an older patch version of Flink (1.14.x → 1.14.y). Rescaling - Restoring the snapshot with a different parallelism than was used during the snapshot creation. Summary # We hope the changes we introduced over the last releases make it easier to operate Flink in respect to snapshotting. We are eager to hear from you if any of the new features have helped you solve problems you&rsquo;ve faced in the past. At the same time, if you still struggle with an issue or you had to work around some obstacles, please let us know! Maybe we will be able to incorporate your approach or find a different solution together.
-`}),e.add({id:110,href:"/2022/05/05/announcing-the-release-of-apache-flink-1.15/",title:"Announcing the Release of Apache Flink 1.15",section:"Flink Blog",content:`Thanks to our well-organized and open community, Apache Flink continues to grow as a technology and remain one of the most active projects in the Apache community. With the release of Flink 1.15, we are proud to announce a number of exciting changes.
+`}),e.add({id:111,href:"/2022/05/05/announcing-the-release-of-apache-flink-1.15/",title:"Announcing the Release of Apache Flink 1.15",section:"Flink Blog",content:`Thanks to our well-organized and open community, Apache Flink continues to grow as a technology and remain one of the most active projects in the Apache community. With the release of Flink 1.15, we are proud to announce a number of exciting changes.
 One of the main concepts that makes Apache Flink stand out is the unification of batch (aka bounded) and stream (aka unbounded) data processing, which helps reduce the complexity of development. A lot of effort went into this unification in the previous releases, and you can expect more efforts in this direction.
 Apache Flink is not only growing when it comes to contributions and users, but also out of the original use cases. We are seeing a trend towards more business/analytics use cases implemented in low-/no-code. Flink SQL is the feature in the Flink ecosystem that enables such uses cases and this is why its popularity continues to grow.
 Apache Flink is an essential building block in data pipelines/architectures and is used with many other technologies in order to drive all sorts of use cases. While new ideas/products may appear in this domain, existing technologies continue to establish themselves as standards for solving mission-critical problems. Knowing that we have such a wide reach and play a role in the success of many projects, it is important that the experience of integrating Apache Flink with the cloud infrastructures and existing systems is as seamless and easy as possible.
@@ -1928,7 +1938,7 @@
 Upgrade Notes # While we aim to make upgrades as smooth as possible, some of the changes require users to adjust some parts of the program when upgrading to Apache Flink 1.15. Please take a look at the release notes for a list of applicable adjustments and issues during upgrades. The one big thing worth mentioning when upgrading is the updated dependencies without the Scala version. Get the details here.
 List of Contributors # The Apache Flink community would like to thank each and every one of the contributors that have made this release possible:
 Ada Wong, Ahmed Hamdy, Aitozi, Alexander Fedulov, Alexander Preuß, Alexander Trushev, Ali Bahadir Zeybek, Anton Kalashnikov, Arvid Heise, Bernard Joseph Jean Bruno, Bo Cui, Brian Zhou, Camile, ChangLi, Chengkai Yang, Chesnay Schepler, Daisy T, Danny Cranmer, David Anderson, David Moravek, David N Perkins, Dawid Wysakowicz, Denis-Cosmin Nutiu, Dian Fu, Dong Lin, Eelis Kostiainen, Etienne Chauchot, Fabian Paul, Francesco Guardiani, Gabor Somogyi, Galen Warren, Gao Yun, Gen Luo, GitHub, Gyula Fora, Hang Ruan, Hangxiang Yu, Honnix, Horace Lee, Ingo Bürk, JIN FENG, Jack, Jane Chan, Jark Wu, JianZhangYang, Jiangjie (Becket) Qin, JianzhangYang, Jiayi Liao, Jing, Jing Ge, Jing Zhang, Jingsong Lee, JingsongLi, Jinzhong Li, Joao Boto, Joey Lee, John Karp, Jon Gillham, Jun Qin, Junfan Zhang, Juntao Hu, Kexin, Kexin Hui, Kirill Listopad, Konstantin Knauf, LB-Yu, Leonard Xu, Lijie Wang, Liu Jiangang, Maciej Bryński, Marios Trivyzas, MartijnVisser, Mason Chen, Matthias Pohl, Michal Ciesielczyk, Mika, Mika Naylor, Mrart, Mulavar, Nick Burkard, Nico Kruber, Nicolas Raga, Nicolaus Weidner, Niklas Semmler, Nikolay, Nuno Afonso, Oleg Smirnov, Paul Lin, Paul Zhang, PengFei Li, Piotr Nowojski, Px, Qingsheng Ren, Robert Metzger, Roc Marshal, Roman, Roman Khachatryan, Ruanshubin, Rudi Kershaw, Rui Li, Ryan Scudellari, Ryan Skraba, Sebastian Mattheis, Sergey, Sergey Nuyanzin, Shen Zhu, Shengkai, Shuo Cheng, Sike Bai, SteNicholas, Steffen Hausmann, Stephan Ewen, Tartarus0zm, Thesharing, Thomas Weise, Till Rohrmann, Timo Walther, Tony Wei, Victor Xu, Wenhao Ji, X-czh, Xianxun Ye, Xin Yu, Xinbin Huang, Xintong Song, Xuannan, Yang Wang, Yangze Guo, Yao Zhang, Yi Tang, Yibo Wen, Yuan Mei, Yuanhao Tian, Yubin Li, Yuepeng Pan, Yufan Sheng, Yufei Zhang, Yuhao Bi, Yun Gao, Yun Tang, Yuval Itzchakov, Yuxin Tan, Zakelly, Zhu Zhu, Zichen Liu, Zongwen Li, atptour2017, baisike, bgeng777, camilesing, chenxyz707, chenzihao, chuixue, dengziming, dijkwxyz, fanrui, fengli, fenyi, fornaix, gaurav726, godfrey he, godfreyhe, gongzhongqiang, haochenhao, hapihu, hehuiyuan, hongshuboy, huangxingbo, huweihua, iyupeng, jiaoqingbo, jinfeng, jxjgsylsg, kevin.cyj, kylewang, lbb, liliwei, liming.1018, lincoln lee, liufangqi, liujiangang, liushouwei, liuyongvs, lixiaobao14, lmagic233, lovewin99, lujiefsi, luoyuxia, lz, mans2singh, martijnvisser, mayue.fight, nanmu42, oogetyboogety, paul8263, pusheng.li01, qianchutao, realdengziqi, ruanhang1993, sammieliu, shammon, shihong90, shitou, shouweikun, shouzuo1, shuo.cs, siavash119, simenliuxing, sjwiesman, slankka, slinkydeveloper, snailHumming, snuyanzin, sujun, sujun1, syhily, tsreaper, txdong-sz, unknown, vahmed-hamdy, wangfeifan, wangpengcheng, wangyang0918, wangzhiwu, wangzhuo, wgzhao, wsz94, xiangqiao123, xmarker, xuyang, xuyu, xuzifu666, yangjunhan, yangze.gyz, ysymi, yuxia Luo, zhang chaoming, zhangchaoming, zhangjiaogg, zhangjingcun, zhangjun02, zhangmang, zlzhang0122, zoucao, zp, zzccctv, 周平, 子扬, 李锐, 蒋龙, 龙三, 庄天翼
-`}),e.add({id:111,href:"/2022/04/03/apache-flink-kubernetes-operator-0.1.0-release-announcement/",title:"Apache Flink Kubernetes Operator 0.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the preview release of the Apache Flink Kubernetes Operator (0.1.0)
+`}),e.add({id:112,href:"/2022/04/03/apache-flink-kubernetes-operator-0.1.0-release-announcement/",title:"Apache Flink Kubernetes Operator 0.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce the preview release of the Apache Flink Kubernetes Operator (0.1.0)
 The Flink Kubernetes Operator allows users to easily manage their Flink deployment lifecycle using native Kubernetes tooling.
 The operator takes care of submitting, savepointing, upgrading and generally managing Flink jobs using the built-in Flink Kubernetes integration. This way users do not have to use the Flink Clients (e.g. CLI) or interact with the Flink jobs manually, they only have to declare the desired deployment specification and the operator will take care of the rest. It also make it easier to integrate Flink job management with CI/CD tooling.
 Core Features
@@ -1948,7 +1958,7 @@
 For more details, check the updated documentation and the release notes. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 List of Contributors # The Apache Flink community would like to thank each and every one of the contributors that have made this release possible:
 Aitozi, Biao Geng, Gyula Fora, Hao Xin, Jaegu Kim, Jaganathan Asokan, Junfan Zhang, Marton Balassi, Matyas Orhidi, Nicholas Jiang, Sandor Kelemen, Thomas Weise, Yang Wang, 愚鲤
-`}),e.add({id:112,href:"/2022/03/16/the-generic-asynchronous-base-sink/",title:"The Generic Asynchronous Base Sink",section:"Flink Blog",content:`Flink sinks share a lot of similar behavior. Most sinks batch records according to user-defined buffering hints, sign requests, write them to the destination, retry unsuccessful or throttled requests, and participate in checkpointing.
+`}),e.add({id:113,href:"/2022/03/16/the-generic-asynchronous-base-sink/",title:"The Generic Asynchronous Base Sink",section:"Flink Blog",content:`Flink sinks share a lot of similar behavior. Most sinks batch records according to user-defined buffering hints, sign requests, write them to the destination, retry unsuccessful or throttled requests, and participate in checkpointing.
 This is why for Flink 1.15 we have decided to create the AsyncSinkBase (FLIP-171), an abstract sink with a number of common functionalities extracted.
 This is a base implementation for asynchronous sinks, which you should use whenever you need to implement a sink that doesn&rsquo;t offer transactional capabilities. Adding support for a new destination now only requires a lightweight shim that implements the specific interfaces of the destination using a client that supports async requests.
 This common abstraction will reduce the effort required to maintain individual sinks that extend from this abstract sink, with bug fixes and improvements to the sink core benefiting all implementations that extend it. The design of AsyncSinkBase focuses on extensibility and a broad support of destinations. The core of the sink is kept generic and free of any connector-specific dependencies.
@@ -1973,11 +1983,11 @@
 CurrentSendTime Gauge - returns the amount of time in milliseconds it took for the most recent request to write records to complete, whether successful or not. NumBytesOut Counter - counts the total number of bytes the sink has tried to write to the destination, using the method getSizeInBytes to determine the size of each record. This will double count failures that may need to be retried. NumRecordsOut Counter - similar to above, this counts the total number of records the sink has tried to write to the destination. This will double count failures that may need to be retried. Sink Behavior # There are six sink configuration settings that control the buffering, flushing, and retry behavior of the sink.
 int maxBatchSize - maximum number of elements that may be passed in the list to submitRequestEntries to be written downstream. int maxInFlightRequests - maximum number of uncompleted calls to submitRequestEntries that the SinkWriter will allow at any given point. Once this point has reached, writes and callbacks to add elements to the buffer may block until one or more requests to submitRequestEntries completes. int maxBufferedRequests - maximum buffer length. Callbacks to add elements to the buffer and calls to write will block if this length has been reached and will only unblock if elements from the buffer have been removed for flushing. long maxBatchSizeInBytes - a flush will be attempted if the most recent call to write introduces an element to the buffer such that the total size of the buffer is greater than or equal to this threshold value. long maxTimeInBufferMS - maximum amount of time an element may remain in the buffer. In most cases elements are flushed as a result of the batch size (in bytes or number) being reached or during a snapshot. However, there are scenarios where an element may remain in the buffer forever or a long period of time. To mitigate this, a timer is constantly active in the buffer such that: while the buffer is not empty, it will flush every maxTimeInBufferMS milliseconds. long maxRecordSizeInBytes - maximum size in bytes allowed for a single record, as determined by getSizeInBytes(). Destinations typically have a defined throughput limit and will begin throttling or rejecting requests once near. We employ Additive Increase Multiplicative Decrease (AIMD) as a strategy for selecting the optimal batch size.
 Summary # The AsyncSinkBase is a new abstraction that makes creating and maintaining async sinks easier. This will be available in Flink 1.15 and we hope that you will try it out and give us feedback on it.
-`}),e.add({id:113,href:"/2022/03/11/apache-flink-1.14.4-release-announcement/",title:"Apache Flink 1.14.4 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.14.
+`}),e.add({id:114,href:"/2022/03/11/apache-flink-1.14.4-release-announcement/",title:"Apache Flink 1.14.4 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.14.
 This release includes 51 bug and vulnerability fixes and minor improvements for Flink 1.14. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.14.4.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.14.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.4&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.14.4 Release Notes # Sub-task [FLINK-21788] - Throw PartitionNotFoundException if the partition file has been lost for blocking shuffle [FLINK-24954] - Reset read buffer request timeout on buffer recycling for sort-shuffle [FLINK-25653] - Move buffer recycle in SortMergeSubpartitionReader out of lock to avoid deadlock [FLINK-25654] - Remove the redundant lock in SortMergeResultPartition [FLINK-25879] - Track used search terms in Matomo [FLINK-25880] - Implement Matomo in Flink documentation Bug [FLINK-21752] - NullPointerException on restore in PojoSerializer [FLINK-23946] - Application mode fails fatally when being shut down [FLINK-24334] - Configuration kubernetes.flink.log.dir not working [FLINK-24407] - Pulsar connector chinese document link to Pulsar document location incorrectly. [FLINK-24607] - SourceCoordinator may miss to close SplitEnumerator when failover frequently [FLINK-25171] - When the DDL statement was executed, the column names of the Derived Columns were not validated [FLINK-25199] - StreamEdges are not unique in self-union, which blocks propagation of watermarks [FLINK-25362] - Incorrect dependencies in Table Confluent/Avro docs [FLINK-25407] - Network stack deadlock when cancellation happens during initialisation [FLINK-25466] - TTL configuration could parse in StateTtlConfig#DISABLED [FLINK-25486] - Perjob can not recover from checkpoint when zookeeper leader changes [FLINK-25494] - Duplicate element serializer during DefaultOperatorStateBackendSnapshotStrategy#syncPrepareResources [FLINK-25678] - TaskExecutorStateChangelogStoragesManager.shutdown is not thread-safe [FLINK-25683] - wrong result if table transfrom to DataStream then window process in batch mode [FLINK-25728] - Potential memory leaks in StreamMultipleInputProcessor [FLINK-25732] - Dispatcher#requestMultipleJobDetails returns non-serialiable collection [FLINK-25827] - Potential memory leaks in SourceOperator [FLINK-25856] - Fix use of UserDefinedType in from_elements [FLINK-25883] - The value of DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S is too large [FLINK-25893] - ResourceManagerServiceImpl&#39;s lifecycle can lead to exceptions [FLINK-25952] - Savepoint on S3 are not relocatable even if entropy injection is not enabled [FLINK-26039] - Incorrect value getter in map unnest table function [FLINK-26159] - Pulsar Connector: should add description MAX_FETCH_RECORD in doc to explain slow consumption [FLINK-26160] - Pulsar Connector: stopCursor description should be changed. Connector only stop when auto discovery is disabled. [FLINK-26187] - Chinese docs override english aliases [FLINK-26304] - GlobalCommitter can receive failed committables New Feature [FLINK-20188] - Add Documentation for new File Source [FLINK-21407] - Clarify which sources and APIs support which formats Improvement [FLINK-20830] - Add a type of HEADLESS_CLUSTER_IP for rest service type [FLINK-24880] - Error messages &quot;OverflowError: timeout value is too large&quot; shown when executing PyFlink jobs [FLINK-25160] - Make doc clear: tolerable-failed-checkpoints counts consecutive failures [FLINK-25611] - Remove CoordinatorExecutorThreadFactory thread creation guards [FLINK-25650] - Document unaligned checkpoints performance limitations (larger records/flat map/timers/...) [FLINK-25767] - Translation of page &#39;Working with State&#39; is incomplete [FLINK-25818] - Add explanation how Kafka Source deals with idleness when parallelism is higher then the number of partitions Technical Debt [FLINK-25576] - Update com.h2database:h2 to 2.0.206 [FLINK-25785] - Update com.h2database:h2 to 2.0.210 `}),e.add({id:114,href:"/2022/02/22/scala-free-in-one-fifteen/",title:"Scala Free in One Fifteen",section:"Flink Blog",content:`Flink 1.15 is right around the corner, and among the many improvements is a Scala free classpath. Users can now leverage the Java API from any Scala version, including Scala 3!
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.14.4 Release Notes # Sub-task [FLINK-21788] - Throw PartitionNotFoundException if the partition file has been lost for blocking shuffle [FLINK-24954] - Reset read buffer request timeout on buffer recycling for sort-shuffle [FLINK-25653] - Move buffer recycle in SortMergeSubpartitionReader out of lock to avoid deadlock [FLINK-25654] - Remove the redundant lock in SortMergeResultPartition [FLINK-25879] - Track used search terms in Matomo [FLINK-25880] - Implement Matomo in Flink documentation Bug [FLINK-21752] - NullPointerException on restore in PojoSerializer [FLINK-23946] - Application mode fails fatally when being shut down [FLINK-24334] - Configuration kubernetes.flink.log.dir not working [FLINK-24407] - Pulsar connector chinese document link to Pulsar document location incorrectly. [FLINK-24607] - SourceCoordinator may miss to close SplitEnumerator when failover frequently [FLINK-25171] - When the DDL statement was executed, the column names of the Derived Columns were not validated [FLINK-25199] - StreamEdges are not unique in self-union, which blocks propagation of watermarks [FLINK-25362] - Incorrect dependencies in Table Confluent/Avro docs [FLINK-25407] - Network stack deadlock when cancellation happens during initialisation [FLINK-25466] - TTL configuration could parse in StateTtlConfig#DISABLED [FLINK-25486] - Perjob can not recover from checkpoint when zookeeper leader changes [FLINK-25494] - Duplicate element serializer during DefaultOperatorStateBackendSnapshotStrategy#syncPrepareResources [FLINK-25678] - TaskExecutorStateChangelogStoragesManager.shutdown is not thread-safe [FLINK-25683] - wrong result if table transfrom to DataStream then window process in batch mode [FLINK-25728] - Potential memory leaks in StreamMultipleInputProcessor [FLINK-25732] - Dispatcher#requestMultipleJobDetails returns non-serialiable collection [FLINK-25827] - Potential memory leaks in SourceOperator [FLINK-25856] - Fix use of UserDefinedType in from_elements [FLINK-25883] - The value of DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S is too large [FLINK-25893] - ResourceManagerServiceImpl&#39;s lifecycle can lead to exceptions [FLINK-25952] - Savepoint on S3 are not relocatable even if entropy injection is not enabled [FLINK-26039] - Incorrect value getter in map unnest table function [FLINK-26159] - Pulsar Connector: should add description MAX_FETCH_RECORD in doc to explain slow consumption [FLINK-26160] - Pulsar Connector: stopCursor description should be changed. Connector only stop when auto discovery is disabled. [FLINK-26187] - Chinese docs override english aliases [FLINK-26304] - GlobalCommitter can receive failed committables New Feature [FLINK-20188] - Add Documentation for new File Source [FLINK-21407] - Clarify which sources and APIs support which formats Improvement [FLINK-20830] - Add a type of HEADLESS_CLUSTER_IP for rest service type [FLINK-24880] - Error messages &quot;OverflowError: timeout value is too large&quot; shown when executing PyFlink jobs [FLINK-25160] - Make doc clear: tolerable-failed-checkpoints counts consecutive failures [FLINK-25611] - Remove CoordinatorExecutorThreadFactory thread creation guards [FLINK-25650] - Document unaligned checkpoints performance limitations (larger records/flat map/timers/...) [FLINK-25767] - Translation of page &#39;Working with State&#39; is incomplete [FLINK-25818] - Add explanation how Kafka Source deals with idleness when parallelism is higher then the number of partitions Technical Debt [FLINK-25576] - Update com.h2database:h2 to 2.0.206 [FLINK-25785] - Update com.h2database:h2 to 2.0.210 `}),e.add({id:115,href:"/2022/02/22/scala-free-in-one-fifteen/",title:"Scala Free in One Fifteen",section:"Flink Blog",content:`Flink 1.15 is right around the corner, and among the many improvements is a Scala free classpath. Users can now leverage the Java API from any Scala version, including Scala 3!
 Fig.1 Flink 1.15 Scala 3 Example This blog will discuss what has historically made supporting multiple Scala versions so complex, how we achieved this milestone, and the future of Scala in Apache Flink.
 TLDR: All Scala dependencies are now isolated to the flink-scala jar. To remove Scala from the user-code classpath, remove this jar from the lib directory of the Flink distribution. $ rm flink-dist/lib/flink-scala* The Classpath and Scala # If you have worked with a JVM-based application, you have probably heard the term classpath. The classpath defines where the JVM will search for a given classfile when it needs to be loaded. There may only be one instance of a classfile on each classpath, forcing any dependency Flink exposes onto users. That is why the Flink community works hard to keep our classpath &ldquo;clean&rdquo; - or free of unnecessary dependencies. We achieve this through a combination of shaded dependencies, child first class loading, and a plugins abstraction for optional components.
 The Apache Flink runtime is primarily written in Java but contains critical components that forced Scala on the default classpath. And because Scala does not maintain binary compatibility across minor releases, this historically required cross-building components for all versions of Scala. But due to many reasons - breaking changes in the compiler, a new standard library, and a reworked macro system - this was easier said than done.
@@ -1996,11 +2006,11 @@
 https://github.com/ariskk/flink4s&#160;&#x21a9;&#xfe0e;
 https://github.com/flink-extended/flink-scala-api&#160;&#x21a9;&#xfe0e;
 https://github.com/sjwiesman/flink-scala-3&#160;&#x21a9;&#xfe0e;
-`}),e.add({id:115,href:"/2022/02/18/apache-flink-1.13.6-release-announcement/",title:"Apache Flink 1.13.6 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.13.
+`}),e.add({id:116,href:"/2022/02/18/apache-flink-1.13.6-release-announcement/",title:"Apache Flink 1.13.6 Release Announcement",section:"Flink Blog",content:`The Apache Flink Community is pleased to announce another bug fix release for Flink 1.13.
 This release includes 99 bug and vulnerability fixes and minor improvements for Flink 1.13 including another upgrade of Apache Log4j (to 2.17.1). Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.13.6.
 Release Artifacts # Maven Dependencies # &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.13.6&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.13.6&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.13.6&lt;/version&gt; &lt;/dependency&gt; Binaries # You can find the binaries on the updated Downloads page.
-Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.13.6 Release Notes # Bug [FLINK-15987] - SELECT 1.0e0 / 0.0e0 throws NumberFormatException [FLINK-17914] - HistoryServer deletes cached archives if archive listing fails [FLINK-20195] - Jobs endpoint returns duplicated jobs [FLINK-20370] - Result is wrong when sink primary key is not the same with query [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-23919] - PullUpWindowTableFunctionIntoWindowAggregateRule generates invalid Calc for Window TVF [FLINK-24232] - Archiving of suspended jobs prevents breaks subsequent archive attempts [FLINK-24255] - Test Environment / Mini Cluster do not forward configuration. [FLINK-24310] - A bug in the BufferingSink example in the doc [FLINK-24318] - Casting a number to boolean has different results between &#39;select&#39; fields and &#39;where&#39; condition [FLINK-24334] - Configuration kubernetes.flink.log.dir not working [FLINK-24366] - Unnecessary/misleading error message about failing restores when tasks are already canceled. [FLINK-24401] - TM cannot exit after Metaspace OOM [FLINK-24465] - Wrong javadoc and documentation for buffer timeout [FLINK-24492] - incorrect implicit type conversion between numeric and (var)char [FLINK-24506] - checkpoint directory is not configurable through the Flink configuration passed into the StreamExecutionEnvironment [FLINK-24509] - FlinkKafkaProducer example is not compiling due to incorrect constructer signature used [FLINK-24540] - Fix Resource leak due to Files.list [FLINK-24543] - Zookeeper connection issue causes inconsistent state in Flink [FLINK-24563] - Comparing timstamp_ltz with random string throws NullPointerException [FLINK-24597] - RocksdbStateBackend getKeysAndNamespaces would return duplicate data when using MapState [FLINK-24621] - JobManager fails to recover 1.13.1 checkpoint due to InflightDataRescalingDescriptor [FLINK-24662] - PyFlink sphinx check failed with &quot;node class &#39;meta&#39; is already registered, its visitors will be overridden&quot; [FLINK-24667] - Channel state writer would fail the task directly if meeting exception previously [FLINK-24676] - Schema does not match if explain insert statement with partial column [FLINK-24678] - Correct the metric name of map state contains latency [FLINK-24708] - \`ConvertToNotInOrInRule\` has a bug which leads to wrong result [FLINK-24728] - Batch SQL file sink forgets to close the output stream [FLINK-24761] - Fix PartitionPruner code gen compile fail [FLINK-24846] - AsyncWaitOperator fails during stop-with-savepoint [FLINK-24860] - Fix the wrong position mappings in the Python UDTF [FLINK-24885] - ProcessElement Interface parameter Collector : java.lang.NullPointerException [FLINK-24922] - Fix spelling errors in the word &quot;parallism&quot; [FLINK-25022] - ClassLoader leak with ThreadLocals on the JM when submitting a job through the REST API [FLINK-25067] - Correct the description of RocksDB&#39;s background threads [FLINK-25084] - Field names must be unique. Found duplicates [FLINK-25091] - Official website document FileSink orc compression attribute reference error [FLINK-25096] - Issue in exceptions API(/jobs/:jobid/exceptions) in flink 1.13.2 [FLINK-25199] - StreamEdges are not unique in self-union, which blocks propagation of watermarks [FLINK-25362] - Incorrect dependencies in Table Confluent/Avro docs [FLINK-25468] - Local recovery fails if local state storage and RocksDB working directory are not on the same volume [FLINK-25486] - Perjob can not recover from checkpoint when zookeeper leader changes [FLINK-25494] - Duplicate element serializer during DefaultOperatorStateBackendSnapshotStrategy#syncPrepareResources [FLINK-25513] - CoFlatMapFunction requires both two flat_maps to yield something [FLINK-25559] - SQL JOIN causes data loss [FLINK-25683] - wrong result if table transfrom to DataStream then window process in batch mode [FLINK-25728] - Potential memory leaks in StreamMultipleInputProcessor [FLINK-25732] - Dispatcher#requestMultipleJobDetails returns non-serialiable collection Improvement [FLINK-21407] - Clarify which sources and APIs support which formats [FLINK-20443] - ContinuousProcessingTimeTrigger doesn&#39;t fire at the end of the window [FLINK-21467] - Document possible recommended usage of Bounded{One/Multi}Input.endInput and emphasize that they could be called multiple times [FLINK-23842] - Add log messages for reader registrations and split requests. [FLINK-24631] - Avoiding directly use the labels as selector for deployment and service [FLINK-24739] - State requirements for Flink&#39;s application mode in the documentation [FLINK-24987] - Enhance ExternalizedCheckpointCleanup enum [FLINK-25160] - Make doc clear: tolerable-failed-checkpoints counts consecutive failures [FLINK-25415] - implement retrial on connections to Cassandra container [FLINK-25611] - Remove CoordinatorExecutorThreadFactory thread creation guards [FLINK-25818] - Add explanation how Kafka Source deals with idleness when parallelism is higher then the number of partitions Technical Debt [FLINK-24740] - Update testcontainers dependency to v1.16.2 [FLINK-24796] - Exclude javadocs / node[_modules] directories from CI compile artifact [FLINK-25472] - Update to Log4j 2.17.1 [FLINK-25375] - Update Log4j to 2.17.0 [FLINK-25576] - Update com.h2database:h2 to 2.0.206 `}),e.add({id:116,href:"/2022/01/31/stateful-functions-3.2.0-release-announcement/",title:"Stateful Functions 3.2.0 Release Announcement",section:"Flink Blog",content:`Stateful Functions is a cross-platform stack for building Stateful Serverless applications, making it radically simpler to develop scalable, consistent, and elastic distributed applications. This new release brings various improvements to the StateFun runtime, a leaner way to specify StateFun module components, and a brand new JavaScript SDK!
+Docker Images # library/flink (official images) apache/flink (ASF repository) PyPi # apache-flink==1.13.6 Release Notes # Bug [FLINK-15987] - SELECT 1.0e0 / 0.0e0 throws NumberFormatException [FLINK-17914] - HistoryServer deletes cached archives if archive listing fails [FLINK-20195] - Jobs endpoint returns duplicated jobs [FLINK-20370] - Result is wrong when sink primary key is not the same with query [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-23919] - PullUpWindowTableFunctionIntoWindowAggregateRule generates invalid Calc for Window TVF [FLINK-24232] - Archiving of suspended jobs prevents breaks subsequent archive attempts [FLINK-24255] - Test Environment / Mini Cluster do not forward configuration. [FLINK-24310] - A bug in the BufferingSink example in the doc [FLINK-24318] - Casting a number to boolean has different results between &#39;select&#39; fields and &#39;where&#39; condition [FLINK-24334] - Configuration kubernetes.flink.log.dir not working [FLINK-24366] - Unnecessary/misleading error message about failing restores when tasks are already canceled. [FLINK-24401] - TM cannot exit after Metaspace OOM [FLINK-24465] - Wrong javadoc and documentation for buffer timeout [FLINK-24492] - incorrect implicit type conversion between numeric and (var)char [FLINK-24506] - checkpoint directory is not configurable through the Flink configuration passed into the StreamExecutionEnvironment [FLINK-24509] - FlinkKafkaProducer example is not compiling due to incorrect constructer signature used [FLINK-24540] - Fix Resource leak due to Files.list [FLINK-24543] - Zookeeper connection issue causes inconsistent state in Flink [FLINK-24563] - Comparing timstamp_ltz with random string throws NullPointerException [FLINK-24597] - RocksdbStateBackend getKeysAndNamespaces would return duplicate data when using MapState [FLINK-24621] - JobManager fails to recover 1.13.1 checkpoint due to InflightDataRescalingDescriptor [FLINK-24662] - PyFlink sphinx check failed with &quot;node class &#39;meta&#39; is already registered, its visitors will be overridden&quot; [FLINK-24667] - Channel state writer would fail the task directly if meeting exception previously [FLINK-24676] - Schema does not match if explain insert statement with partial column [FLINK-24678] - Correct the metric name of map state contains latency [FLINK-24708] - \`ConvertToNotInOrInRule\` has a bug which leads to wrong result [FLINK-24728] - Batch SQL file sink forgets to close the output stream [FLINK-24761] - Fix PartitionPruner code gen compile fail [FLINK-24846] - AsyncWaitOperator fails during stop-with-savepoint [FLINK-24860] - Fix the wrong position mappings in the Python UDTF [FLINK-24885] - ProcessElement Interface parameter Collector : java.lang.NullPointerException [FLINK-24922] - Fix spelling errors in the word &quot;parallism&quot; [FLINK-25022] - ClassLoader leak with ThreadLocals on the JM when submitting a job through the REST API [FLINK-25067] - Correct the description of RocksDB&#39;s background threads [FLINK-25084] - Field names must be unique. Found duplicates [FLINK-25091] - Official website document FileSink orc compression attribute reference error [FLINK-25096] - Issue in exceptions API(/jobs/:jobid/exceptions) in flink 1.13.2 [FLINK-25199] - StreamEdges are not unique in self-union, which blocks propagation of watermarks [FLINK-25362] - Incorrect dependencies in Table Confluent/Avro docs [FLINK-25468] - Local recovery fails if local state storage and RocksDB working directory are not on the same volume [FLINK-25486] - Perjob can not recover from checkpoint when zookeeper leader changes [FLINK-25494] - Duplicate element serializer during DefaultOperatorStateBackendSnapshotStrategy#syncPrepareResources [FLINK-25513] - CoFlatMapFunction requires both two flat_maps to yield something [FLINK-25559] - SQL JOIN causes data loss [FLINK-25683] - wrong result if table transfrom to DataStream then window process in batch mode [FLINK-25728] - Potential memory leaks in StreamMultipleInputProcessor [FLINK-25732] - Dispatcher#requestMultipleJobDetails returns non-serialiable collection Improvement [FLINK-21407] - Clarify which sources and APIs support which formats [FLINK-20443] - ContinuousProcessingTimeTrigger doesn&#39;t fire at the end of the window [FLINK-21467] - Document possible recommended usage of Bounded{One/Multi}Input.endInput and emphasize that they could be called multiple times [FLINK-23842] - Add log messages for reader registrations and split requests. [FLINK-24631] - Avoiding directly use the labels as selector for deployment and service [FLINK-24739] - State requirements for Flink&#39;s application mode in the documentation [FLINK-24987] - Enhance ExternalizedCheckpointCleanup enum [FLINK-25160] - Make doc clear: tolerable-failed-checkpoints counts consecutive failures [FLINK-25415] - implement retrial on connections to Cassandra container [FLINK-25611] - Remove CoordinatorExecutorThreadFactory thread creation guards [FLINK-25818] - Add explanation how Kafka Source deals with idleness when parallelism is higher then the number of partitions Technical Debt [FLINK-24740] - Update testcontainers dependency to v1.16.2 [FLINK-24796] - Exclude javadocs / node[_modules] directories from CI compile artifact [FLINK-25472] - Update to Log4j 2.17.1 [FLINK-25375] - Update Log4j to 2.17.0 [FLINK-25576] - Update com.h2database:h2 to 2.0.206 `}),e.add({id:117,href:"/2022/01/31/stateful-functions-3.2.0-release-announcement/",title:"Stateful Functions 3.2.0 Release Announcement",section:"Flink Blog",content:`Stateful Functions is a cross-platform stack for building Stateful Serverless applications, making it radically simpler to develop scalable, consistent, and elastic distributed applications. This new release brings various improvements to the StateFun runtime, a leaner way to specify StateFun module components, and a brand new JavaScript SDK!
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website, and the most recent Java SDK, Python SDK,, GoLang SDK and JavaScript SDK distributions are available on Maven, PyPI, Github, and npm respectively. You can also find official StateFun Docker images of the new version on Dockerhub.
 For more details, check the complete release changelog and the updated documentation. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA!
 New Features # A brand new JavaScript SDK for NodeJS # Stateful Functions provides a unified model for building stateful applications across various programming languages and deployment environments. The community is thrilled to release an official JavaScript SDK as part of the 3.2.0 release.
@@ -2016,7 +2026,7 @@
 Release Notes # Please review the release notes for a detailed list of changes and new features if you plan to upgrade your setup to Stateful Functions 3.2.0.
 List of Contributors # Seth Wiesman, Igal Shilman, Till Rohrmann, Stephan Ewen, Tzu-Li (Gordon) Tai, Ingo Bürk, Evans Ye, neoXfire, Galen Warren
 If you’d like to get involved, we’re always looking for new contributors.
-`}),e.add({id:117,href:"/2022/01/20/pravega-flink-connector-101/",title:"Pravega Flink Connector 101",section:"Flink Blog",content:`Pravega, which is now a CNCF sandbox project, is a cloud-native storage system based on abstractions for both batch and streaming data consumption. Pravega streams (a new storage abstraction) are durable, consistent, and elastic, while natively supporting long-term data retention. In comparison, Apache Flink is a popular real-time computing engine that provides unified batch and stream processing. Flink provides high-throughput, low-latency computation, as well as support for complex event processing and state management. Both Pravega and Flink share the same design philosophy and treat data streams as primitives. This makes them a great match when constructing storage+computing data pipelines which can unify batch and streaming use cases.
+`}),e.add({id:118,href:"/2022/01/20/pravega-flink-connector-101/",title:"Pravega Flink Connector 101",section:"Flink Blog",content:`Pravega, which is now a CNCF sandbox project, is a cloud-native storage system based on abstractions for both batch and streaming data consumption. Pravega streams (a new storage abstraction) are durable, consistent, and elastic, while natively supporting long-term data retention. In comparison, Apache Flink is a popular real-time computing engine that provides unified batch and stream processing. Flink provides high-throughput, low-latency computation, as well as support for complex event processing and state management. Both Pravega and Flink share the same design philosophy and treat data streams as primitives. This makes them a great match when constructing storage+computing data pipelines which can unify batch and streaming use cases.
 That&rsquo;s also the main reason why Pravega has chosen to use Flink as the first integrated execution engine among the various distributed computing engines on the market. With the help of Flink, users can use flexible APIs for windowing, complex event processing (CEP), or table abstractions to process streaming data easily and enrich the data being stored. Since its inception in 2016, Pravega has established communication with Flink PMC members and developed the connector together.
 In 2017, the Pravega Flink connector module started to move out of the Pravega main repository and has been maintained in a new separate repository since then. During years of development, many features have been implemented, including:
 exactly-once processing guarantees for both Reader and Writer, supporting end-to-end exactly-once processing pipelines seamless integration with Flink&rsquo;s checkpoints and savepoints parallel Readers and Writers supporting high throughput and low latency processing support for Batch, Streaming, and Table API to access Pravega Streams These key features make streaming pipeline applications easier to develop without worrying about performance and correctness which are the common pain points for many streaming use cases.
@@ -2057,12 +2067,12 @@
 Future plans # FlinkPravegaInputFormat and FlinkPravegaOutputFormat are now provided to support batch reads and writes in Flink, but these are under the legacy DataSet API. Since Flink is now making efforts to unify batch and streaming, it is improving its APIs and providing new interfaces for the source and sink APIs in the Flink 1.11 and 1.12 releases. We will continue to work with the Flink community and integrate with the new APIs.
 We will also put more effort into SQL / Table API support in order to provide a better user experience since it is simpler to understand and even more powerful to use in some cases.
 Note: the original blog post can be found here.
-`}),e.add({id:118,href:"/2022/01/17/apache-flink-1.14.3-release-announcement/",title:"Apache Flink 1.14.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.14 series. The first bugfix release was 1.14.2, being an emergency release due to an Apache Log4j Zero Day (CVE-2021-44228). Flink 1.14.1 was abandoned. That means that this Flink release is the first bugfix release of the Flink 1.14 series which contains bugfixes not related to the mentioned CVE.
+`}),e.add({id:119,href:"/2022/01/17/apache-flink-1.14.3-release-announcement/",title:"Apache Flink 1.14.3 Release Announcement",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.14 series. The first bugfix release was 1.14.2, being an emergency release due to an Apache Log4j Zero Day (CVE-2021-44228). Flink 1.14.1 was abandoned. That means that this Flink release is the first bugfix release of the Flink 1.14 series which contains bugfixes not related to the mentioned CVE.
 This release includes 164 fixes and minor improvements for Flink 1.14.0. The list below includes bugfixes and improvements. For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.14.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.14.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.14.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-Release Notes - Flink - Version 1.14.3 Sub-task [FLINK-24355] - Expose the flag for enabling checkpoints after tasks finish in the Web UI Bug [FLINK-15987] - SELECT 1.0e0 / 0.0e0 throws NumberFormatException [FLINK-17914] - HistoryServer deletes cached archives if archive listing fails [FLINK-19142] - Local recovery can be broken if slot hijacking happened during a full restart [FLINK-20195] - Jobs endpoint returns duplicated jobs [FLINK-20370] - Result is wrong when sink primary key is not the same with query [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-21345] - NullPointerException LogicalCorrelateToJoinFromTemporalTableFunctionRule.scala:157 [FLINK-22113] - UniqueKey constraint is lost with multiple sources join in SQL [FLINK-22954] - Don&#39;t support consuming update and delete changes when use table function that does not contain table field [FLINK-23614] - The resulting scale of TRUNCATE(DECIMAL, ...) is not correct [FLINK-23704] - FLIP-27 sources are not generating LatencyMarkers [FLINK-23827] - Fix ModifiedMonotonicity inference for some node [FLINK-23919] - PullUpWindowTableFunctionIntoWindowAggregateRule generates invalid Calc for Window TVF [FLINK-24156] - BlobServer crashes due to SocketTimeoutException in Java 11 [FLINK-24232] - Archiving of suspended jobs prevents breaks subsequent archive attempts [FLINK-24291] - Decimal precision is lost when deserializing in test cases [FLINK-24310] - A bug in the BufferingSink example in the doc [FLINK-24315] - Cannot rebuild watcher thread while the K8S API server is unavailable [FLINK-24318] - Casting a number to boolean has different results between &#39;select&#39; fields and &#39;where&#39; condition [FLINK-24331] - PartiallyFinishedSourcesITCase fails with &quot;No downstream received 0 from xxx;&quot; [FLINK-24336] - PyFlink TableEnvironment executes the SQL randomly MalformedURLException with the configuration for &#39;pipeline.classpaths&#39; [FLINK-24344] - Handling of IOExceptions when triggering checkpoints doesn&#39;t cause job failover [FLINK-24353] - Bash scripts do not respect dynamic configurations when calculating memory sizes [FLINK-24366] - Unnecessary/misleading error message about failing restores when tasks are already canceled. [FLINK-24371] - Support SinkWriter preCommit without the need of a committer [FLINK-24377] - TM resource may not be properly released after heartbeat timeout [FLINK-24380] - Flink should handle the state transition of the pod from Pending to Failed [FLINK-24381] - Table API exceptions may leak sensitive configuration values [FLINK-24401] - TM cannot exit after Metaspace OOM [FLINK-24407] - Pulsar connector chinese document link to Pulsar document location incorrectly. [FLINK-24408] - org.codehaus.janino.InternalCompilerException: Compiling &quot;StreamExecValues$200&quot;: Code of method &quot;nextRecord(Ljava/lang/Object;)Ljava/lang/Object;&quot; of class &quot;StreamExecValues$200&quot; grows beyond 64 KB [FLINK-24409] - Kafka topics with periods in their names generate a constant stream of errors [FLINK-24431] - [Kinesis][EFO] EAGER registration strategy does not work when job fails over [FLINK-24432] - RocksIteratorWrapper.seekToLast() calls the wrong RocksIterator method [FLINK-24465] - Wrong javadoc and documentation for buffer timeout [FLINK-24467] - Set min and max buffer size even if the difference less than threshold [FLINK-24468] - NPE when notifyNewBufferSize [FLINK-24469] - Incorrect calcualtion of the buffer size in case of channel data skew [FLINK-24480] - EqualiserCodeGeneratorTest fails on azure [FLINK-24488] - KafkaRecordSerializationSchemaBuilder does not forward timestamp [FLINK-24492] - incorrect implicit type conversion between numeric and (var)char [FLINK-24506] - checkpoint directory is not configurable through the Flink configuration passed into the StreamExecutionEnvironment [FLINK-24540] - Fix Resource leak due to Files.list [FLINK-24543] - Zookeeper connection issue causes inconsistent state in Flink [FLINK-24550] - Can not access job information from a standby jobmanager UI [FLINK-24551] - BUFFER_DEBLOAT_SAMPLES property is taken from the wrong configuration [FLINK-24552] - Ineffective buffer debloat configuration in randomized tests [FLINK-24563] - Comparing timstamp_ltz with random string throws NullPointerException [FLINK-24596] - Bugs in sink.buffer-flush before upsert-kafka [FLINK-24597] - RocksdbStateBackend getKeysAndNamespaces would return duplicate data when using MapState [FLINK-24600] - Duplicate 99th percentile displayed in checkpoint summary [FLINK-24608] - Sinks built with the unified sink framework do not receive timestamps when used in Table API [FLINK-24613] - Documentation on orc supported data types is outdated [FLINK-24647] - ClusterUncaughtExceptionHandler does not log the exception [FLINK-24654] - NPE on RetractableTopNFunction when some records were cleared by state ttl [FLINK-24662] - PyFlink sphinx check failed with &quot;node class &#39;meta&#39; is already registered, its visitors will be overridden&quot; [FLINK-24667] - Channel state writer would fail the task directly if meeting exception previously [FLINK-24676] - Schema does not match if explain insert statement with partial column [FLINK-24678] - Correct the metric name of map state contains latency [FLINK-24691] - FLINK SQL SUM() causes a precision error [FLINK-24704] - Exception occurs when the input record loses monotonicity on the sort key field of UpdatableTopNFunction [FLINK-24706] - AkkaInvocationHandler silently ignores deserialization errors [FLINK-24708] - \`ConvertToNotInOrInRule\` has a bug which leads to wrong result [FLINK-24728] - Batch SQL file sink forgets to close the output stream [FLINK-24733] - Data loss in pulsar source when using shared mode [FLINK-24738] - Fail during announcing buffer size to released local channel [FLINK-24761] - Fix PartitionPruner code gen compile fail [FLINK-24773] - KafkaCommitter should fail on unknown Exception [FLINK-24777] - Processed (persisted) in-flight data description miss on Monitoring Checkpointing page [FLINK-24789] - IllegalStateException with CheckpointCleaner being closed already [FLINK-24792] - OperatorCoordinatorSchedulerTest crashed JVM on AZP [FLINK-24835] - &quot;group by&quot; in the interval join will throw a exception [FLINK-24846] - AsyncWaitOperator fails during stop-with-savepoint [FLINK-24858] - TypeSerializer version mismatch during eagerly restore [FLINK-24874] - Dropdown menu is not properly shown in UI [FLINK-24885] - ProcessElement Interface parameter Collector : java.lang.NullPointerException [FLINK-24919] - UnalignedCheckpointITCase hangs on Azure [FLINK-24922] - Fix spelling errors in the word &quot;parallism&quot; [FLINK-24937] - &quot;kubernetes application HA test&quot; hangs on azure [FLINK-24938] - Checkpoint cleaner is closed before checkpoints are discarded [FLINK-25022] - ClassLoader leak with ThreadLocals on the JM when submitting a job through the REST API [FLINK-25067] - Correct the description of RocksDB&#39;s background threads [FLINK-25084] - Field names must be unique. Found duplicates [FLINK-25091] - Official website document FileSink orc compression attribute reference error [FLINK-25096] - Issue in exceptions API(/jobs/:jobid/exceptions) in flink 1.13.2 [FLINK-25126] - FlinkKafkaInternalProducer state is not reset if transaction finalization fails [FLINK-25132] - KafkaSource cannot work with object-reusing DeserializationSchema [FLINK-25134] - Unused RetryRule in KafkaConsumerTestBase swallows retries [FLINK-25222] - Remove NetworkFailureProxy used for Kafka connector tests [FLINK-25271] - ApplicationDispatcherBootstrapITCase. testDispatcherRecoversAfterLosingAndRegainingLeadership failed on azure [FLINK-25294] - Incorrect cloudpickle import [FLINK-25375] - Update Log4j to 2.17.0 [FLINK-25418] - The dir_cache is specified in the flink task. When there is no network, you will still download the python third-party library [FLINK-25446] - Avoid sanity check on read bytes on DataInputStream#read(byte[]) [FLINK-25468] - Local recovery fails if local state storage and RocksDB working directory are not on the same volume [FLINK-25477] - The directory structure of the State Backends document is not standardized [FLINK-25513] - CoFlatMapFunction requires both two flat_maps to yield something Improvement [FLINK-20443] - ContinuousProcessingTimeTrigger doesn&#39;t fire at the end of the window [FLINK-21467] - Document possible recommended usage of Bounded{One/Multi}Input.endInput and emphasize that they could be called multiple times [FLINK-23519] - Aggregate State Backend Latency by State Level [FLINK-23798] - Avoid using reflection to get filter when partition filter is enabled [FLINK-23842] - Add log messages for reader registrations and split requests. [FLINK-23914] - Make connector testing framework more verbose on test failure [FLINK-24117] - Remove unHandledErrorListener in ZooKeeperLeaderElectionDriver and ZooKeeperLeaderRetrievalDriver [FLINK-24148] - Add bloom filter policy option in RocksDBConfiguredOptions [FLINK-24382] - RecordsOut metric for sinks is inaccurate [FLINK-24437] - Remove unhandled exception handler from CuratorFramework before closing it [FLINK-24460] - Rocksdb Iterator Error Handling Improvement [FLINK-24481] - Translate buffer debloat documenation to chinese [FLINK-24529] - flink sql job cannot use custom job name [FLINK-24631] - Avoiding directly use the labels as selector for deployment and service [FLINK-24670] - Restructure unaligned checkpoints documentation page to &quot;Checkpointing under back pressure&quot; [FLINK-24690] - Clarification of buffer size threshold calculation in BufferDebloater [FLINK-24695] - Update how to configure unaligned checkpoints in the documentation [FLINK-24739] - State requirements for Flink&#39;s application mode in the documentation [FLINK-24813] - Improve ImplicitTypeConversionITCase [FLINK-24880] - Error messages &quot;OverflowError: timeout value is too large&quot; shown when executing PyFlink jobs [FLINK-24958] - correct the example and link for temporal table function documentation [FLINK-24987] - Enhance ExternalizedCheckpointCleanup enum [FLINK-25092] - Implement artifact cacher for Bash based Elasticsearch test Technical Debt [FLINK-24367] - Add a fallback AkkaRpcSystemLoader for tests in the IDE [FLINK-24445] - Move RPC System packaging to package phase [FLINK-24455] - FallbackAkkaRpcSystemLoader should check for maven errors [FLINK-24513] - AkkaRpcSystemLoader must be an ITCase [FLINK-24559] - flink-rpc-akka-loader does not bundle flink-rpc-akka [FLINK-24609] - flink-rpc-akka uses wrong Scala version property for parser-combinators [FLINK-24859] - Document new File formats [FLINK-25472] - Update to Log4j 2.17.1 `}),e.add({id:119,href:"/2022/01/07/apache-flink-ml-2.0.0-release-announcement/",title:"Apache Flink ML 2.0.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink ML 2.0.0! Flink ML is a library that provides APIs and infrastructure for building stream-batch unified machine learning algorithms, that can be easy-to-use and performant with (near-) real-time latency.
+Release Notes - Flink - Version 1.14.3 Sub-task [FLINK-24355] - Expose the flag for enabling checkpoints after tasks finish in the Web UI Bug [FLINK-15987] - SELECT 1.0e0 / 0.0e0 throws NumberFormatException [FLINK-17914] - HistoryServer deletes cached archives if archive listing fails [FLINK-19142] - Local recovery can be broken if slot hijacking happened during a full restart [FLINK-20195] - Jobs endpoint returns duplicated jobs [FLINK-20370] - Result is wrong when sink primary key is not the same with query [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-21345] - NullPointerException LogicalCorrelateToJoinFromTemporalTableFunctionRule.scala:157 [FLINK-22113] - UniqueKey constraint is lost with multiple sources join in SQL [FLINK-22954] - Don&#39;t support consuming update and delete changes when use table function that does not contain table field [FLINK-23614] - The resulting scale of TRUNCATE(DECIMAL, ...) is not correct [FLINK-23704] - FLIP-27 sources are not generating LatencyMarkers [FLINK-23827] - Fix ModifiedMonotonicity inference for some node [FLINK-23919] - PullUpWindowTableFunctionIntoWindowAggregateRule generates invalid Calc for Window TVF [FLINK-24156] - BlobServer crashes due to SocketTimeoutException in Java 11 [FLINK-24232] - Archiving of suspended jobs prevents breaks subsequent archive attempts [FLINK-24291] - Decimal precision is lost when deserializing in test cases [FLINK-24310] - A bug in the BufferingSink example in the doc [FLINK-24315] - Cannot rebuild watcher thread while the K8S API server is unavailable [FLINK-24318] - Casting a number to boolean has different results between &#39;select&#39; fields and &#39;where&#39; condition [FLINK-24331] - PartiallyFinishedSourcesITCase fails with &quot;No downstream received 0 from xxx;&quot; [FLINK-24336] - PyFlink TableEnvironment executes the SQL randomly MalformedURLException with the configuration for &#39;pipeline.classpaths&#39; [FLINK-24344] - Handling of IOExceptions when triggering checkpoints doesn&#39;t cause job failover [FLINK-24353] - Bash scripts do not respect dynamic configurations when calculating memory sizes [FLINK-24366] - Unnecessary/misleading error message about failing restores when tasks are already canceled. [FLINK-24371] - Support SinkWriter preCommit without the need of a committer [FLINK-24377] - TM resource may not be properly released after heartbeat timeout [FLINK-24380] - Flink should handle the state transition of the pod from Pending to Failed [FLINK-24381] - Table API exceptions may leak sensitive configuration values [FLINK-24401] - TM cannot exit after Metaspace OOM [FLINK-24407] - Pulsar connector chinese document link to Pulsar document location incorrectly. [FLINK-24408] - org.codehaus.janino.InternalCompilerException: Compiling &quot;StreamExecValues$200&quot;: Code of method &quot;nextRecord(Ljava/lang/Object;)Ljava/lang/Object;&quot; of class &quot;StreamExecValues$200&quot; grows beyond 64 KB [FLINK-24409] - Kafka topics with periods in their names generate a constant stream of errors [FLINK-24431] - [Kinesis][EFO] EAGER registration strategy does not work when job fails over [FLINK-24432] - RocksIteratorWrapper.seekToLast() calls the wrong RocksIterator method [FLINK-24465] - Wrong javadoc and documentation for buffer timeout [FLINK-24467] - Set min and max buffer size even if the difference less than threshold [FLINK-24468] - NPE when notifyNewBufferSize [FLINK-24469] - Incorrect calcualtion of the buffer size in case of channel data skew [FLINK-24480] - EqualiserCodeGeneratorTest fails on azure [FLINK-24488] - KafkaRecordSerializationSchemaBuilder does not forward timestamp [FLINK-24492] - incorrect implicit type conversion between numeric and (var)char [FLINK-24506] - checkpoint directory is not configurable through the Flink configuration passed into the StreamExecutionEnvironment [FLINK-24540] - Fix Resource leak due to Files.list [FLINK-24543] - Zookeeper connection issue causes inconsistent state in Flink [FLINK-24550] - Can not access job information from a standby jobmanager UI [FLINK-24551] - BUFFER_DEBLOAT_SAMPLES property is taken from the wrong configuration [FLINK-24552] - Ineffective buffer debloat configuration in randomized tests [FLINK-24563] - Comparing timstamp_ltz with random string throws NullPointerException [FLINK-24596] - Bugs in sink.buffer-flush before upsert-kafka [FLINK-24597] - RocksdbStateBackend getKeysAndNamespaces would return duplicate data when using MapState [FLINK-24600] - Duplicate 99th percentile displayed in checkpoint summary [FLINK-24608] - Sinks built with the unified sink framework do not receive timestamps when used in Table API [FLINK-24613] - Documentation on orc supported data types is outdated [FLINK-24647] - ClusterUncaughtExceptionHandler does not log the exception [FLINK-24654] - NPE on RetractableTopNFunction when some records were cleared by state ttl [FLINK-24662] - PyFlink sphinx check failed with &quot;node class &#39;meta&#39; is already registered, its visitors will be overridden&quot; [FLINK-24667] - Channel state writer would fail the task directly if meeting exception previously [FLINK-24676] - Schema does not match if explain insert statement with partial column [FLINK-24678] - Correct the metric name of map state contains latency [FLINK-24691] - FLINK SQL SUM() causes a precision error [FLINK-24704] - Exception occurs when the input record loses monotonicity on the sort key field of UpdatableTopNFunction [FLINK-24706] - AkkaInvocationHandler silently ignores deserialization errors [FLINK-24708] - \`ConvertToNotInOrInRule\` has a bug which leads to wrong result [FLINK-24728] - Batch SQL file sink forgets to close the output stream [FLINK-24733] - Data loss in pulsar source when using shared mode [FLINK-24738] - Fail during announcing buffer size to released local channel [FLINK-24761] - Fix PartitionPruner code gen compile fail [FLINK-24773] - KafkaCommitter should fail on unknown Exception [FLINK-24777] - Processed (persisted) in-flight data description miss on Monitoring Checkpointing page [FLINK-24789] - IllegalStateException with CheckpointCleaner being closed already [FLINK-24792] - OperatorCoordinatorSchedulerTest crashed JVM on AZP [FLINK-24835] - &quot;group by&quot; in the interval join will throw a exception [FLINK-24846] - AsyncWaitOperator fails during stop-with-savepoint [FLINK-24858] - TypeSerializer version mismatch during eagerly restore [FLINK-24874] - Dropdown menu is not properly shown in UI [FLINK-24885] - ProcessElement Interface parameter Collector : java.lang.NullPointerException [FLINK-24919] - UnalignedCheckpointITCase hangs on Azure [FLINK-24922] - Fix spelling errors in the word &quot;parallism&quot; [FLINK-24937] - &quot;kubernetes application HA test&quot; hangs on azure [FLINK-24938] - Checkpoint cleaner is closed before checkpoints are discarded [FLINK-25022] - ClassLoader leak with ThreadLocals on the JM when submitting a job through the REST API [FLINK-25067] - Correct the description of RocksDB&#39;s background threads [FLINK-25084] - Field names must be unique. Found duplicates [FLINK-25091] - Official website document FileSink orc compression attribute reference error [FLINK-25096] - Issue in exceptions API(/jobs/:jobid/exceptions) in flink 1.13.2 [FLINK-25126] - FlinkKafkaInternalProducer state is not reset if transaction finalization fails [FLINK-25132] - KafkaSource cannot work with object-reusing DeserializationSchema [FLINK-25134] - Unused RetryRule in KafkaConsumerTestBase swallows retries [FLINK-25222] - Remove NetworkFailureProxy used for Kafka connector tests [FLINK-25271] - ApplicationDispatcherBootstrapITCase. testDispatcherRecoversAfterLosingAndRegainingLeadership failed on azure [FLINK-25294] - Incorrect cloudpickle import [FLINK-25375] - Update Log4j to 2.17.0 [FLINK-25418] - The dir_cache is specified in the flink task. When there is no network, you will still download the python third-party library [FLINK-25446] - Avoid sanity check on read bytes on DataInputStream#read(byte[]) [FLINK-25468] - Local recovery fails if local state storage and RocksDB working directory are not on the same volume [FLINK-25477] - The directory structure of the State Backends document is not standardized [FLINK-25513] - CoFlatMapFunction requires both two flat_maps to yield something Improvement [FLINK-20443] - ContinuousProcessingTimeTrigger doesn&#39;t fire at the end of the window [FLINK-21467] - Document possible recommended usage of Bounded{One/Multi}Input.endInput and emphasize that they could be called multiple times [FLINK-23519] - Aggregate State Backend Latency by State Level [FLINK-23798] - Avoid using reflection to get filter when partition filter is enabled [FLINK-23842] - Add log messages for reader registrations and split requests. [FLINK-23914] - Make connector testing framework more verbose on test failure [FLINK-24117] - Remove unHandledErrorListener in ZooKeeperLeaderElectionDriver and ZooKeeperLeaderRetrievalDriver [FLINK-24148] - Add bloom filter policy option in RocksDBConfiguredOptions [FLINK-24382] - RecordsOut metric for sinks is inaccurate [FLINK-24437] - Remove unhandled exception handler from CuratorFramework before closing it [FLINK-24460] - Rocksdb Iterator Error Handling Improvement [FLINK-24481] - Translate buffer debloat documenation to chinese [FLINK-24529] - flink sql job cannot use custom job name [FLINK-24631] - Avoiding directly use the labels as selector for deployment and service [FLINK-24670] - Restructure unaligned checkpoints documentation page to &quot;Checkpointing under back pressure&quot; [FLINK-24690] - Clarification of buffer size threshold calculation in BufferDebloater [FLINK-24695] - Update how to configure unaligned checkpoints in the documentation [FLINK-24739] - State requirements for Flink&#39;s application mode in the documentation [FLINK-24813] - Improve ImplicitTypeConversionITCase [FLINK-24880] - Error messages &quot;OverflowError: timeout value is too large&quot; shown when executing PyFlink jobs [FLINK-24958] - correct the example and link for temporal table function documentation [FLINK-24987] - Enhance ExternalizedCheckpointCleanup enum [FLINK-25092] - Implement artifact cacher for Bash based Elasticsearch test Technical Debt [FLINK-24367] - Add a fallback AkkaRpcSystemLoader for tests in the IDE [FLINK-24445] - Move RPC System packaging to package phase [FLINK-24455] - FallbackAkkaRpcSystemLoader should check for maven errors [FLINK-24513] - AkkaRpcSystemLoader must be an ITCase [FLINK-24559] - flink-rpc-akka-loader does not bundle flink-rpc-akka [FLINK-24609] - flink-rpc-akka uses wrong Scala version property for parser-combinators [FLINK-24859] - Document new File formats [FLINK-25472] - Update to Log4j 2.17.1 `}),e.add({id:120,href:"/2022/01/07/apache-flink-ml-2.0.0-release-announcement/",title:"Apache Flink ML 2.0.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink ML 2.0.0! Flink ML is a library that provides APIs and infrastructure for building stream-batch unified machine learning algorithms, that can be easy-to-use and performant with (near-) real-time latency.
 This release involves a major refactor of the earlier Flink ML library and introduces major features that extend the Flink ML API and the iteration runtime, such as supporting stages with multi-input multi-output, graph-based stage composition, and a new stream-batch unified iteration library. Moreover, we added five algorithm implementations in this release, which is the start of a long-term initiative to provide a large number of off-the-shelf algorithms in Flink ML with state-of-the-art performance.
 We believe this release is an important step towards extending Apache Flink to a wide range of machine learning use cases, especially the real-time machine learning scenarios.
 We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA! We hope you like the new release and we’d be eager to learn about your experience with it.
@@ -2104,14 +2114,14 @@
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website, and the most recent distribution of Flink ML Python package is available on PyPI.
 List of Contributors # The Apache Flink community would like to thank each one of the contributors that have made this release possible:
 Yun Gao, Dong Lin, Zhipeng Zhang, huangxingbo, Yunfeng Zhou, Jiangjie (Becket) Qin, weibo, abdelrahman-ik.
-`}),e.add({id:120,href:"/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-one/",title:"How We Improved Scheduler Performance for Large-scale Jobs - Part One",section:"Flink Blog",content:` Introduction # When scheduling large-scale jobs in Flink 1.12, a lot of time is required to initialize jobs and deploy tasks. The scheduler also requires a large amount of heap memory in order to store the execution topology and host temporary deployment descriptors. For example, for a job with a topology that contains two vertices connected with an all-to-all edge and a parallelism of 10k (which means there are 10k source tasks and 10k sink tasks and every source task is connected to all sink tasks), Flink’s JobManager would require 30 GiB of heap memory and more than 4 minutes to deploy all of the tasks.
+`}),e.add({id:121,href:"/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-one/",title:"How We Improved Scheduler Performance for Large-scale Jobs - Part One",section:"Flink Blog",content:` Introduction # When scheduling large-scale jobs in Flink 1.12, a lot of time is required to initialize jobs and deploy tasks. The scheduler also requires a large amount of heap memory in order to store the execution topology and host temporary deployment descriptors. For example, for a job with a topology that contains two vertices connected with an all-to-all edge and a parallelism of 10k (which means there are 10k source tasks and 10k sink tasks and every source task is connected to all sink tasks), Flink’s JobManager would require 30 GiB of heap memory and more than 4 minutes to deploy all of the tasks.
 Furthermore, task deployment may block the JobManager&rsquo;s main thread for a long time and the JobManager will not be able to respond to any other requests from TaskManagers. This could lead to heartbeat timeouts that trigger a failover. In the worst case, this will render the Flink cluster unusable because it cannot deploy the job.
 To improve the performance of the scheduler for large-scale jobs, we&rsquo;ve implemented several optimizations in Flink 1.13 and 1.14:
 Introduce the concept of consuming groups to optimize procedures related to the complexity of topologies, including the initialization, scheduling, failover, and partition release. This also reduces the memory required to store the topology; Introduce a cache to optimize task deployment, which makes the process faster and requires less memory; Leverage characteristics of the logical topology and the scheduling topology to speed up the building of pipelined regions. Benchmarking Results # To estimate the effect of our optimizations, we conducted several experiments to compare the performance of Flink 1.12 (before the optimization) with Flink 1.14 (after the optimization). The job in our experiments contains two vertices connected with an all-to-all edge. The parallelisms of these vertices are both 10K. To make temporary deployment descriptors distributed via the blob server, we set the configuration blob.offload.minsize to 100 KiB (from default value 1 MiB). This configuration means that the blobs larger than the set value will be distributed via the blob server, and the size of deployment descriptors in our test job is about 270 KiB. The results of our experiments are illustrated below:
 Table 1 - The comparison of time cost between Flink 1.12 and 1.14 Procedure 1.12 1.14 Reduction(%) Job Initialization 11,431ms 627ms 94.51% Task Deployment 63,118ms 17,183ms 72.78% Computing tasks to restart when failover 37,195ms 170ms 99.55% In addition to quicker speeds, the memory usage is significantly reduced. It requires 30 GiB heap memory for a JobManager to deploy the test job and keep it running stably with Flink 1.12, while the minimum heap memory required by the JobManager with Flink 1.14 is only 2 GiB. There are also less occurrences of long-term garbage collection. When running the test job with Flink 1.12, a garbage collection that lasts more than 10 seconds occurs during both job initialization and task deployment. With Flink 1.14, since there is no long-term garbage collection, there is also a decreased risk of heartbeat timeouts, which creates better cluster stability.
 In our experiment, it took more than 4 minutes for the large-scale job with Flink 1.12 to transition to running (excluding the time spent on allocating resources). With Flink 1.14, it took no more than 30 seconds (excluding the time spent on allocating resources). The time cost is reduced by 87%. Thus, for users who are running large-scale jobs for production and want better scheduling performance, please consider upgrading Flink to 1.14.
 In part two of this blog post, we are going to talk about these improvements in detail.
-`}),e.add({id:121,href:"/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-two/",title:"How We Improved Scheduler Performance for Large-scale Jobs - Part Two",section:"Flink Blog",content:`Part one of this blog post briefly introduced the optimizations we’ve made to improve the performance of the scheduler; compared to Flink 1.12, the time cost and memory usage of scheduling large-scale jobs in Flink 1.14 is significantly reduced. In part two, we will elaborate on the details of these optimizations.
+`}),e.add({id:122,href:"/2022/01/04/how-we-improved-scheduler-performance-for-large-scale-jobs-part-two/",title:"How We Improved Scheduler Performance for Large-scale Jobs - Part Two",section:"Flink Blog",content:`Part one of this blog post briefly introduced the optimizations we’ve made to improve the performance of the scheduler; compared to Flink 1.12, the time cost and memory usage of scheduling large-scale jobs in Flink 1.14 is significantly reduced. In part two, we will elaborate on the details of these optimizations.
 Reducing complexity with groups # A distribution pattern describes how consumer tasks are connected to producer tasks. Currently, there are two distribution patterns in Flink: pointwise and all-to-all. When the distribution pattern is pointwise between two vertices, the computational complexity of traversing all edges is O(n). When the distribution pattern is all-to-all, the complexity of traversing all edges is O(n2), which means that complexity increases rapidly when the scale goes up.
 Fig. 1 - Two distribution patterns in Flink In Flink 1.12, the ExecutionEdge class is used to store the information of connections between tasks. This means that for the all-to-all distribution pattern, there would be O(n2) ExecutionEdges, which would take up a lot of memory for large-scale jobs. For two JobVertices connected with an all-to-all edge and a parallelism of 10K, it would take more than 4 GiB memory to store 100M ExecutionEdges. Since there can be multiple all-to-all connections between vertices in production jobs, the amount of memory required would increase rapidly.
 As we can see in Fig. 1, for two JobVertices connected with the all-to-all distribution pattern, all IntermediateResultPartitions produced by upstream ExecutionVertices are isomorphic, which means that the downstream ExecutionVertices they connect to are exactly the same. The downstream ExecutionVertices belonging to the same JobVertex are also isomorphic, as the upstream IntermediateResultPartitions they connect to are the same too. Since every JobEdge has exactly one distribution type, we can divide vertices and result partitions into groups according to the distribution type of the JobEdge.
@@ -2141,22 +2151,22 @@
 If there are only pointwise distribution patterns inside a region, Tarjan&rsquo;s strongly connected components algorithm is still used to ensure no cyclic dependencies. Since there are only pointwise distribution patterns, the number of edges in the topology is O(n), and the computational complexity of the algorithm will be O(n).
 Fig. 6 - How to convert a LogicalPipelinedRegion to ScheduledPipelinedRegions After the optimization, the overall computational complexity of building pipelined regions decreases from O(n2) to O(n). In our experiments, for the job which contains two vertices connected with a blocking all-to-all edge, when their parallelisms are both 10K, the time of building pipelined regions decreases by 99%, from 8,257 ms to 120 ms.
 Summary # All in all, we&rsquo;ve done several optimizations to improve the scheduler’s performance for large-scale jobs in Flink 1.13 and 1.14. The optimizations involve procedures including job initialization, scheduling, task deployment, and failover. If you have any questions about them, please feel free to start a discussion in the dev mail list.
-`}),e.add({id:122,href:"/2021/12/22/apache-flink-statefun-log4j-emergency-release/",title:"Apache Flink StateFun Log4j emergency release",section:"Flink Blog",content:`The Apache Flink community has released an emergency bugfix version of Apache Flink Stateful Function 3.1.1.
+`}),e.add({id:123,href:"/2021/12/22/apache-flink-statefun-log4j-emergency-release/",title:"Apache Flink StateFun Log4j emergency release",section:"Flink Blog",content:`The Apache Flink community has released an emergency bugfix version of Apache Flink Stateful Function 3.1.1.
 This release include a version upgrade of Apache Flink to 1.13.5, for log4j to address CVE-2021-44228 and CVE-2021-45046.
 We highly recommend all users to upgrade to the latest patch release.
 You can find the source and binaries on the updated Downloads page, and Docker images in the apache/flink-statefun dockerhub repository.
-`}),e.add({id:123,href:"/2021/12/16/apache-flink-log4j-emergency-releases/",title:"Apache Flink Log4j emergency releases",section:"Flink Blog",content:`The Apache Flink community has released emergency bugfix versions of Apache Flink for the 1.11, 1.12, 1.13 and 1.14 series.
+`}),e.add({id:124,href:"/2021/12/16/apache-flink-log4j-emergency-releases/",title:"Apache Flink Log4j emergency releases",section:"Flink Blog",content:`The Apache Flink community has released emergency bugfix versions of Apache Flink for the 1.11, 1.12, 1.13 and 1.14 series.
 These releases only include a version upgrade for Log4j to address CVE-2021-44228 and CVE-2021-45046.
 We highly recommend all users to upgrade to the respective patch release.
 You can find the source and binaries on the updated Downloads page, and Docker images in the apache/flink dockerhub repository.
 We are publishing this announcement earlier than usual to give users access to the updated source/binary releases as soon as possible. As a result of that certain artifacts are not yet available:
 Maven artifacts are currently being synced to Maven central and will become available over the next 24 hours. The 1.11.6/1.12.7 Python binaries will be published at a later date. This post will be continously updated to reflect the latest state.
 The newly released versions are: 1.14.2 1.13.5 1.12.7 1.11.6 To clarify and avoid confusion: The 1.14.1 / 1.13.4 / 1.12.6 / 1.11.5 releases, which were supposed to only contain a Log4j upgrade to 2.15.0, were skipped because CVE-2021-45046 was discovered during the release publication. Some artifacts were published to Maven Central, but no source/binary releases nor Docker images are available for those versions.
-`}),e.add({id:124,href:"/2021/12/10/advise-on-apache-log4j-zero-day-cve-2021-44228/",title:"Advise on Apache Log4j Zero Day (CVE-2021-44228)",section:"Flink Blog",content:` Please see [this](/news/2021/12/16/log4j-patch-releases) for our updated recommendation regarding this CVE. Yesterday, a new Zero Day for Apache Log4j was reported. It is by now tracked under CVE-2021-44228.
+`}),e.add({id:125,href:"/2021/12/10/advise-on-apache-log4j-zero-day-cve-2021-44228/",title:"Advise on Apache Log4j Zero Day (CVE-2021-44228)",section:"Flink Blog",content:` Please see [this](/news/2021/12/16/log4j-patch-releases) for our updated recommendation regarding this CVE. Yesterday, a new Zero Day for Apache Log4j was reported. It is by now tracked under CVE-2021-44228.
 Apache Flink is bundling a version of Log4j that is affected by this vulnerability. We recommend users to follow the advisory of the Apache Log4j Community. For Apache Flink this currently translates to setting the following property in your flink-conf.yaml:
 env.java.opts: -Dlog4j2.formatMsgNoLookups=true If you are already setting env.java.opts.jobmanager, env.java.opts.taskmanager, env.java.opts.client, or env.java.opts.historyserver you should instead add the system change to those existing parameter lists.
 As soon as Log4j has been upgraded to 2.15.0 in Apache Flink, this is not necessary anymore. This effort is tracked in FLINK-25240. It will be included in Flink 1.15.0, Flink 1.14.1 and Flink 1.13.3. We expect Flink 1.14.1 to be released in the next 1-2 weeks. The other releases will follow in their regular cadence.
-`}),e.add({id:125,href:"/2021/11/03/flink-backward-the-apache-flink-retrospective/",title:"Flink Backward - The Apache Flink Retrospective",section:"Flink Blog",content:`It has now been a month since the community released Apache Flink 1.14 into the wild. We had a comprehensive look at the enhancements, additions, and fixups in the release announcement blog post, and now we will look at the development cycle from a different angle. Based on feedback collected from contributors involved in this release, we will explore the experiences and processes behind it all.
+`}),e.add({id:126,href:"/2021/11/03/flink-backward-the-apache-flink-retrospective/",title:"Flink Backward - The Apache Flink Retrospective",section:"Flink Blog",content:`It has now been a month since the community released Apache Flink 1.14 into the wild. We had a comprehensive look at the enhancements, additions, and fixups in the release announcement blog post, and now we will look at the development cycle from a different angle. Based on feedback collected from contributors involved in this release, we will explore the experiences and processes behind it all.
 A retrospective on the release cycle # From the team, we collected emotions that have been attributed to points in time of the 1.14 release cycle:
 The overall sentiment seems to be quite good. A ship crushed a robot two times, someone felt sick towards the end, an octopus causing negative emotions appeared in June&hellip;
 We looked at the origin of these emotions and analyzed what went well and what could be improved. We also incorporated some feedback gathered from the community.
@@ -2181,7 +2191,7 @@
 :heart:
 An open source community is more than just working on software. Apache Flink is the perfect example of software that is collaborated on in all parts of the world. The active mailing list, the discussions on FLIPs, and the interactions on Jira tickets all document how people work together to build something great. We should never forget that.
 In the meantime, the community is already working towards Apache Flink 1.15. If you would like to become a contributor, please reach out via the dev mailing list. We are happy to help you find a ticket to get started on.
-`}),e.add({id:126,href:"/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-one/",title:"Sort-Based Blocking Shuffle Implementation in Flink - Part One",section:"Flink Blog",content:`Part one of this blog post will explain the motivation behind introducing sort-based blocking shuffle, present benchmark results, and provide guidelines on how to use this new feature.
+`}),e.add({id:127,href:"/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-one/",title:"Sort-Based Blocking Shuffle Implementation in Flink - Part One",section:"Flink Blog",content:`Part one of this blog post will explain the motivation behind introducing sort-based blocking shuffle, present benchmark results, and provide guidelines on how to use this new feature.
 How data gets passed around between operators # Data shuffling is an important stage in batch processing applications and describes how data is sent from one operator to the next. In this phase, output data of the upstream operator will spill over to persistent storages like disk, then the downstream operator will read the corresponding data and process it. Blocking shuffle means that intermediate results from operator A are not sent immediately to operator B until operator A has completely finished.
 The hash-based and sort-based blocking shuffle are two main blocking shuffle implementations widely adopted by existing distributed data processing frameworks:
 Hash-Based Approach: The core idea behind the hash-based approach is to write data consumed by different consumer tasks to different files and each file can then serve as a natural boundary for the partitioned data. Sort-Based Approach: The core idea behind the sort-based approach is to write all the produced data together first and then leverage sorting to cluster data belonging to different data partitions or even keys. The sort-based blocking shuffle was introduced in Flink 1.12 and further optimized and made production-ready in 1.13 for both stability and performance. We hope you enjoy the improvements and any feedback is highly appreciated.
@@ -2200,7 +2210,7 @@
 For more information about blocking shuffle in Flink, please refer to the official documentation.
 Note: From the optimization mechanism in part two, we can see that the IO scheduling relies on the concurrent data read requests of the downstream consumer tasks for more sequential reads. As a result, if the downstream consumer task is running one by one (for example, because of limited resources), the advantage brought by IO scheduling disappears, which can influence performance. We may further optimize this scenario in future versions.
 What&rsquo;s next? # For details on the design and implementation of this feature, please refer to the second part of this blog!
-`}),e.add({id:127,href:"/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/",title:"Sort-Based Blocking Shuffle Implementation in Flink - Part Two",section:"Flink Blog",content:`Part one of this blog post explained the motivation behind introducing sort-based blocking shuffle, presented benchmark results, and provided guidelines on how to use this new feature.
+`}),e.add({id:128,href:"/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/",title:"Sort-Based Blocking Shuffle Implementation in Flink - Part Two",section:"Flink Blog",content:`Part one of this blog post explained the motivation behind introducing sort-based blocking shuffle, presented benchmark results, and provided guidelines on how to use this new feature.
 Like sort-merge shuffle implemented by other distributed data processing frameworks, the whole sort-based shuffle process in Flink consists of several important stages, including collecting data in memory, sorting the collected data in memory, spilling the sorted data to files, and reading the shuffle data from these spilled files. However, Flink’s implementation has some core differences, including the multiple data region file structure, the removal of file merge, and IO scheduling.
 In part two of this blog post, we will give you insight into some core design considerations and implementation details of the sort-based blocking shuffle in Flink and list several ideas for future improvement.
 Design considerations # There are several core objectives we want to achieve for the new sort-based blocking shuffle to be implemented Flink:
@@ -2228,7 +2238,7 @@
 Multi-Disks Load Balance: Multi-Disks Load Balance: In production environments, there are usually multiple disks per node, better load balance can lead to better performance, the relevant issues are FLINK-21790 and FLINK-21789.
 External/Remote Shuffle Service: Implementing an external/remote shuffle service can further improve the shuffle io performance because as a centralized service, it can collect more information leading to more optimized decisions. For example, further merging of data to the same downstream task, better node-level load balance, handling of stragglers, shared resources and so on. There are several relevant issues: FLINK-13247, FLINK-22672, FLINK-19551 and FLINK-10653.
 Enable the Choice of SSD/HDD: In production environments, there are usually both SSD and HDD storage. Some jobs may prefer SSD for the faster speed, some jobs may prefer HDD for larger space and cheaper price. Enabling the choice of SSD/HDD can improve the usability of Flink’s blocking shuffle.
-`}),e.add({id:128,href:"/2021/10/19/apache-flink-1.13.3-released/",title:"Apache Flink 1.13.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.13 series.
+`}),e.add({id:129,href:"/2021/10/19/apache-flink-1.13.3-released/",title:"Apache Flink 1.13.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.13 series.
 This release includes 136 fixes and minor improvements for Flink 1.13.2. The list below includes bugfixes and improvements. For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.13.3.
 Updated Maven dependencies:
@@ -2239,7 +2249,7 @@
 Clarify SourceFunction#cancel() contract about interrupting (FLINK-23527) # The contract of the SourceFunction#cancel() method with respect to interruptions has been clarified:
 The source itself shouldn&rsquo;t interrupt the source thread. The source can expect to not be interrupted during a clean cancellation procedure. taskmanager.slot.timeout falls back to akka.ask.timeout (FLINK-22002) # The config option taskmanager.slot.timeout falls now back to akka.ask.timeout if no value has been configured.
 Increase akka.ask.timeout for tests using the MiniCluster (FLINK-23906) # The default akka.ask.timeout used by the MiniCluster has been increased to 5 minutes. If you want to use a smaller value, then you have to set it explicitly in the passed configuration. The change is due to the fact that messages cannot get lost in a single-process minicluster, so this timeout (which otherwise helps to detect message loss in distributed setups) has no benefit here. The increased timeout reduces the number of false-positive timeouts, for example during heavy tests on loaded CI/CD workers or during debugging.
-`}),e.add({id:129,href:"/2021/09/29/apache-flink-1.14.0-release-announcement/",title:"Apache Flink 1.14.0 Release Announcement",section:"Flink Blog",content:`The Apache Software Foundation recently released its annual report and Apache Flink once again made it on the list of the top 5 most active projects! This remarkable activity also shows in the new 1.14.0 release. Once again, more than 200 contributors worked on over 1,000 issues. We are proud of how this community is consistently moving the project forward.
+`}),e.add({id:130,href:"/2021/09/29/apache-flink-1.14.0-release-announcement/",title:"Apache Flink 1.14.0 Release Announcement",section:"Flink Blog",content:`The Apache Software Foundation recently released its annual report and Apache Flink once again made it on the list of the top 5 most active projects! This remarkable activity also shows in the new 1.14.0 release. Once again, more than 200 contributors worked on over 1,000 issues. We are proud of how this community is consistently moving the project forward.
 This release brings many new features and improvements in areas such as the SQL API, more connector support, checkpointing, and PyFlink. A major area of changes in this release is the integrated streaming &amp; batch experience. We believe that, in practice, unbounded stream processing goes hand-in-hand with bounded- and batch processing tasks, because many use cases require processing historic data from various sources alongside streaming data. Examples are data exploration when developing new applications, bootstrapping state for new applications, training models to be applied in a streaming application, or re-processing data after fixes/upgrades.
 In Flink 1.14, we finally made it possible to mix bounded and unbounded streams in an application: Flink now supports taking checkpoints of applications that are partially running and partially finished (some operators reached the end of the bounded inputs). Additionally, bounded streams now take a final checkpoint when reaching their end to ensure smooth committing of results in sinks.
 The batch execution mode now supports programs that use a mixture of the DataStream API and the SQL/Table API (previously only pure Table/SQL or DataStream programs).
@@ -2293,7 +2303,7 @@
 Upgrade Notes # While we aim to make upgrades as smooth as possible, some of the changes require users to adjust some parts of the program when upgrading to Apache Flink 1.14. Please take a look at the release notes for a list of adjustments to make and issues to check during upgrades.
 List of Contributors # The Apache Flink community would like to thank each one of the contributors that have made this release possible:
 adavis9592, Ada Wong, aidenma, Aitozi, Ankush Khanna, anton, Anton Kalashnikov, Arvid Heise, Ashwin Kolhatkar, Authuir, bgeng777, Brian Zhou, camile.sing, caoyingjie, Cemre Mengu, chennuo, Chesnay Schepler, chuixue, CodeCooker17, comsir, Daisy T, Danny Cranmer, David Anderson, David Moravek, Dawid Wysakowicz, dbgp2021, Dian Fu, Dong Lin, Edmondsky, Elphas Toringepi, Emre Kartoglu, ericliuk, Eron Wright, est08zw, Etienne Chauchot, Fabian Paul, fangliang, fangyue1, fengli, Francesco Guardiani, FuyaoLi2017, fuyli, Gabor Somogyi, gaoyajun02, Gen Luo, gentlewangyu, GitHub, godfrey he, godfreyhe, gongzhongqiang, Guokuai Huang, GuoWei Ma, Gyula Fora, hackergin, hameizi, Hang Ruan, Han Wei, hapihu, hehuiyuan, hstdream, Huachao Mao, HuangXiao, huangxingbo, huxixiang, Ingo Bürk, Jacklee, Jan Brusch, Jane, Jane Chan, Jark Wu, JasonLee, Jiajie Zhong, Jiangjie (Becket) Qin, Jianzhang Chen, Jiayi Liao, Jing, Jingsong Lee, JingsongLi, Jing Zhang, jinxing64, junfan.zhang, Jun Qin, Jun Zhang, kanata163, Kevin Bohinski, kevin.cyj, Kevin Fan, Kurt Young, kylewang, Lars Bachmann, lbb, LB Yu, LB-Yu, LeeJiangchuan, Leeviiii, leiyanfei, Leonard Xu, LightGHLi, Lijie Wang, liliwei, lincoln lee, Linyu, liuyanpunk, lixiaobao14, luoyuxia, Lyn Zhang, lys0716, MaChengLong, mans2singh, Marios Trivyzas, martijnvisser, Matthias Pohl, Mayi, mayue.fight, Michael Li, Michal Ciesielczyk, Mika, Mika Naylor, MikuSugar, movesan, Mulan, Nico Kruber, Nicolas Raga, Nicolaus Weidner, paul8263, Paul Lin, pierre xiong, Piotr Nowojski, Qingsheng Ren, Rainie Li, Robert Metzger, Roc Marshal, Roman, Roman Khachatryan, Rui Li, sammieliu, sasukerui, Senbin Lin, Senhong Liu, Serhat Soydan, Seth Wiesman, sharkdtu, Shengkai, Shen Zhu, shizhengchao, Shuo Cheng, shuo.cs, simenliuxing, sjwiesman, Srinivasulu Punuru, Stefan Gloutnikov, SteNicholas, Stephan Ewen, sujun, sv3ndk, Svend Vanderveken, syhily, Tartarus0zm, Terry Wang, Thesharing, Thomas Weise, tiegen, Till Rohrmann, Timo Walther, tison, Tony Wei, trushev, tsreaper, TsReaper, Tzu-Li (Gordon) Tai, wangfeifan, wangwei1025, wangxianghu, wangyang0918, weizheng92, Wenhao Ji, Wenlong Lyu, wenqiao, WilliamSong11, wuren, wysstartgo, Xintong Song, yanchenyun, yangminghua, yangqu, Yang Wang, Yangyang ZHANG, Yangze Guo, Yao Zhang, yfhanfei, yiksanchan, Yik San Chan, Yi Tang, yljee, Youngwoo Kim, Yuan Mei, Yubin Li, Yufan Sheng, yulei0824, Yun Gao, Yun Tang, yuxia Luo, Zakelly, zhang chaoming, zhangjunfan, zhangmang, zhangzhengqi3, zhao_wei_nan, zhaown, zhaoxing, ZhiJie Yang, Zhilong Hong, Zhiwen Sun, Zhu Zhu, zlzhang0122, zoran, Zor X. LIU, zoucao, Zsombor Chikan, 子扬, 莫辞
-`}),e.add({id:130,href:"/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-one/",title:"Implementing a Custom Source Connector for Table API and SQL - Part One ",section:"Flink Blog",content:`Part one of this tutorial will teach you how to build and run a custom source connector to be used with Table API and SQL, two high-level abstractions in Flink. The tutorial comes with a bundled docker-compose setup that lets you easily run the connector. You can then try it out with Flink’s SQL client.
+`}),e.add({id:131,href:"/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-one/",title:"Implementing a Custom Source Connector for Table API and SQL - Part One ",section:"Flink Blog",content:`Part one of this tutorial will teach you how to build and run a custom source connector to be used with Table API and SQL, two high-level abstractions in Flink. The tutorial comes with a bundled docker-compose setup that lets you easily run the connector. You can then try it out with Flink’s SQL client.
 Introduction # Apache Flink is a data processing engine that aims to keep state locally in order to do computations efficiently. However, Flink does not &ldquo;own&rdquo; the data but relies on external systems to ingest and persist data. Connecting to external data input (sources) and external data storage (sinks) is usually summarized under the term connectors in Flink.
 Since connectors are such important components, Flink ships with connectors for some popular systems. But sometimes you may need to read in an uncommon data format and what Flink provides is not enough. This is why Flink also provides extension points for building custom connectors if you want to connect to a system that is not supported by an existing connector.
 Once you have a source and a sink defined for Flink, you can use its declarative APIs (in the form of the Table API and SQL) to execute queries for data analysis.
@@ -2335,7 +2345,7 @@
 Now that you have a working connector, the next step is to make it do something more useful than returning static data.
 Summary # In this tutorial, you looked into the infrastructure required for a connector and configured its runtime implementation to define how it should be executed in a cluster. You also defined a dynamic table source that reads the entire stream-converted table from the external source, made the connector discoverable by Flink through creating a factory class for it, and then tested it.
 Next Steps # In part two, you will integrate this connector with an email inbox through the IMAP protocol.
-`}),e.add({id:131,href:"/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-two/",title:"Implementing a custom source connector for Table API and SQL - Part Two ",section:"Flink Blog",content:`In part one of this tutorial, you learned how to build a custom source connector for Flink. In part two, you will learn how to integrate the connector with a test email inbox through the IMAP protocol and filter out emails using Flink SQL.
+`}),e.add({id:132,href:"/2021/09/07/implementing-a-custom-source-connector-for-table-api-and-sql-part-two/",title:"Implementing a custom source connector for Table API and SQL - Part Two ",section:"Flink Blog",content:`In part one of this tutorial, you learned how to build a custom source connector for Flink. In part two, you will learn how to integrate the connector with a test email inbox through the IMAP protocol and filter out emails using Flink SQL.
 Goals # Part two of the tutorial will teach you how to:
 integrate a source connector which connects to a mailbox using the IMAP protocol use Jakarta Mail, a Java library that can send and receive email via the IMAP protocol write Flink SQL and execute the queries in the Ververica Platform for a nicer visualization You are encouraged to follow along with the code in this repository. It provides a boilerplate project that also comes with a bundled docker-compose setup that lets you easily run the connector. You can then try it out with Flink’s SQL client.
 Prerequisites # This tutorial assumes that you have:
@@ -2374,7 +2384,7 @@
 Ververica Platform - SQL Editor
 Summary # Apache Flink is designed for easy extensibility and allows users to access many different external systems as data sources or sinks through a versatile set of connectors. It can read and write data from databases, local and distributed file systems.
 Flink also exposes APIs on top of which custom connectors can be built. In this two-part blog series, you explored some of these APIs and concepts and learned how to implement your own custom source connector that can read in data from an email inbox. You then used Flink to process incoming emails through the IMAP protocol and wrote some Flink SQL.
-`}),e.add({id:132,href:"/2021/08/31/stateful-functions-3.1.0-release-announcement/",title:"Stateful Functions 3.1.0 Release Announcement",section:"Flink Blog",content:`Stateful Functions is a cross-platform stack for building Stateful Serverless applications, making it radically simpler to develop scalable, consistent, and elastic distributed applications. This new release brings various improvements to the StateFun runtime, a leaner way to specify StateFun module components, and a brand new GoLang SDK!
+`}),e.add({id:133,href:"/2021/08/31/stateful-functions-3.1.0-release-announcement/",title:"Stateful Functions 3.1.0 Release Announcement",section:"Flink Blog",content:`Stateful Functions is a cross-platform stack for building Stateful Serverless applications, making it radically simpler to develop scalable, consistent, and elastic distributed applications. This new release brings various improvements to the StateFun runtime, a leaner way to specify StateFun module components, and a brand new GoLang SDK!
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website, and the most recent Java SDK, Python SDK, and GoLang SDK distributions are available on Maven, PyPI, and Github repecitvely. You can also find official StateFun Docker images of the new version on Dockerhub.
 For more details, check the complete release changelog and the updated documentation. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA!
 New Features # Delayed Message Cancellation # Stateful Functions communicate by sending messages, but sometimes it is helpful that a function will send a message for itself. For example, you may want to set a time limit on a customer onboarding flow to complete. This can easily be implmented by sending a message with a delay. But up until now, there was no way to indicate to the StateFun runtime that a particular delayed message is not necessary anymore (a customer had completed their onboarding flow). With StateFun 3.1, it is now possible to cancel a delayed message.
@@ -2396,30 +2406,30 @@
 GoLang SDK Showcase GoLang Greeter GoLang SDK Documentation Release Notes # Release Notes # Please review the release notes for a detailed list of changes and new features if you plan to upgrade your setup to Stateful Functions 3.1.0.
 List of Contributors # Evans Ye, George Birbilis, Igal Shilman, Konstantin Knauf, Seth Wiesman, Siddique Ahmad, Tzu-Li (Gordon) Tai, ariskk, austin ce
 If you’d like to get involved, we’re always looking for new contributors.
-`}),e.add({id:133,href:"/2021/08/31/help-us-stabilize-apache-flink-1.14.0-rc0/",title:"Help us stabilize Apache Flink 1.14.0 RC0",section:"Flink Blog",content:` Hint Update 29th of September: Today Apache Flink 1.14 has been released. For sure we'd still like to hear your feedback. Dear Flink Community,
+`}),e.add({id:134,href:"/2021/08/31/help-us-stabilize-apache-flink-1.14.0-rc0/",title:"Help us stabilize Apache Flink 1.14.0 RC0",section:"Flink Blog",content:` Hint Update 29th of September: Today Apache Flink 1.14 has been released. For sure we'd still like to hear your feedback. Dear Flink Community,
 we are excited to announce the first release candidate of Apache Flink 1.14. 🎉
 A lot of features and fixes went into this release, including improvements to the unified batch and streaming experience, an increase in fault tolerance by reducing in-flight data, and more developments on connectors and components. It wouldn&rsquo;t have been possible without your help. Around 211 people have made contributions!
 Two weeks ago (August 16th) we created a feature freeze. This means that only a few small, almost-ready features will go into the release from this moment on. We are now in the process of stabilizing the release and need your help! As you can see on the 1.14 release coordination page, a lot of focus is on documentation and testing.
 If you would like to contribute to the squirrel community, a great way would be to download the release candidate and test it. You can run some existing Flink jobs or pick one of the test issues. We would greatly appreciate any feedback you can provide on the JIRA tickets or on the mailing list.
 We continue to be grateful and inspired by the community who believe in the project and want to help create a great user experience and product for all Flink users.
 Many thanks!
-`}),e.add({id:134,href:"/2021/08/09/apache-flink-1.11.4-released/",title:"Apache Flink 1.11.4 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.11 series.
+`}),e.add({id:135,href:"/2021/08/09/apache-flink-1.11.4-released/",title:"Apache Flink 1.11.4 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.11 series.
 This release includes 78 fixes and minor improvements for Flink 1.11.4. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.11.4.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.11.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.4&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-Release Notes - Flink - Version 1.11.4 Sub-task [FLINK-21070] - Overloaded aggregate functions cause converter errors [FLINK-21486] - Add sanity check when switching from Rocks to Heap timers Bug [FLINK-15262] - kafka connector doesn&#39;t read from beginning immediately when &#39;connector.startup-mode&#39; = &#39;earliest-offset&#39; [FLINK-16443] - Fix wrong fix for user-code CheckpointExceptions [FLINK-18438] - TaskManager start failed [FLINK-19369] - BlobClientTest.testGetFailsDuringStreamingForJobPermanentBlob hangs [FLINK-19436] - TPC-DS end-to-end test (Blink planner) failed during shutdown [FLINK-19771] - NullPointerException when accessing null array from postgres in JDBC Connector [FLINK-20288] - Correct documentation about savepoint self-contained [FLINK-20383] - DataSet allround end-to-end test fails with NullPointerException [FLINK-20626] - Canceling a job when it is failing will result in job hanging in CANCELING state [FLINK-20666] - Fix the deserialized Row losing the field_name information in PyFlink [FLINK-20675] - Asynchronous checkpoint failure would not fail the job anymore [FLINK-20680] - Fails to call var-arg function with no parameters [FLINK-20752] - FailureRateRestartBackoffTimeStrategy allows one less restart than configured [FLINK-20793] - Fix NamesTest due to code style refactor [FLINK-20803] - Version mismatch between spotless-maven-plugin and google-java-format plugin [FLINK-20832] - Deliver bootstrap resouces ourselves for website and documentation [FLINK-20841] - Fix compile error due to duplicated generated files [FLINK-20913] - Improve new HiveConf(jobConf, HiveConf.class) [FLINK-20989] - Functions in ExplodeFunctionUtil should handle null data to avoid NPE [FLINK-21008] - Residual HA related Kubernetes ConfigMaps and ZooKeeper nodes when cluster entrypoint received SIGTERM in shutdown [FLINK-21009] - Can not disable certain options in Elasticsearch 7 connector [FLINK-21013] - Blink planner does not ingest timestamp into StreamRecord [FLINK-21028] - Streaming application didn&#39;t stop properly [FLINK-21030] - Broken job restart for job with disjoint graph [FLINK-21071] - Snapshot branches running against flink-docker dev-master branch [FLINK-21132] - BoundedOneInput.endInput is called when taking synchronous savepoint [FLINK-21138] - KvStateServerHandler is not invoked with user code classloader [FLINK-21148] - YARNSessionFIFOSecuredITCase cannot connect to BlobServer [FLINK-21208] - pyarrow exception when using window with pandas udaf [FLINK-21213] - e2e test fail with &#39;As task is already not running, no longer decline checkpoint&#39; [FLINK-21215] - Checkpoint was declined because one input stream is finished [FLINK-21216] - StreamPandasConversionTests Fails [FLINK-21274] - At per-job mode, during the exit of the JobManager process, if ioExecutor exits at the end, the System.exit() method will not be executed. [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-21312] - SavepointITCase.testStopSavepointWithBoundedInputConcurrently is unstable [FLINK-21323] - Stop-with-savepoint is not supported by SourceOperatorStreamTask [FLINK-21453] - BoundedOneInput.endInput is NOT called when doing stop with savepoint WITH drain [FLINK-21497] - JobLeaderIdService completes leader future despite no leader being elected [FLINK-21550] - ZooKeeperHaServicesTest.testSimpleClose fail [FLINK-21606] - TaskManager connected to invalid JobManager leading to TaskSubmissionException [FLINK-21609] - SimpleRecoveryITCaseBase.testRestartMultipleTimes fails on azure [FLINK-21654] - YARNSessionCapacitySchedulerITCase.testStartYarnSessionClusterInQaTeamQueue fail because of NullPointerException [FLINK-21725] - DataTypeExtractor extracts wrong fields ordering for Tuple12 [FLINK-21753] - Cycle references between memory manager and gc cleaner action [FLINK-21980] - ZooKeeperRunningJobsRegistry creates an empty znode [FLINK-21986] - taskmanager native memory not release timely after restart [FLINK-22081] - Entropy key not resolved if flink-s3-fs-hadoop is added as a plugin [FLINK-22109] - Misleading exception message if the number of arguments of a nested function is incorrect [FLINK-22184] - Rest client shutdown on failure runs in netty thread [FLINK-22424] - Writing to already released buffers potentially causing data corruption during job failover/cancellation [FLINK-22489] - subtask backpressure indicator shows value for entire job [FLINK-22597] - JobMaster cannot be restarted [FLINK-22815] - Disable unaligned checkpoints for broadcast partitioning [FLINK-22946] - Network buffer deadlock introduced by unaligned checkpoint [FLINK-23164] - JobMasterTest.testMultipleStartsWork unstable on azure [FLINK-23166] - ZipUtils doesn&#39;t handle properly for softlinks inside the zip file Improvement [FLINK-9844] - PackagedProgram does not close URLClassLoader [FLINK-18182] - Upgrade AWS SDK in flink-connector-kinesis to include new region af-south-1 [FLINK-19415] - Move Hive document to &quot;Table &amp; SQL Connectors&quot; from &quot;Table API &amp; SQL&quot; [FLINK-20651] - Use Spotless/google-java-format for code formatting/enforcement [FLINK-20770] - Incorrect description for config option kubernetes.rest-service.exposed.type [FLINK-20790] - Generated classes should not be put under src/ directory [FLINK-20792] - Allow shorthand invocation of spotless [FLINK-20805] - Blink runtime classes partially ignored by spotless [FLINK-20866] - Add how to list jobs in Yarn deployment documentation when HA enabled [FLINK-20906] - Update copyright year to 2021 for NOTICE files [FLINK-21020] - Bump Jackson to 2.10.5[.1] / 2.12.1 [FLINK-21123] - Upgrade Beanutils 1.9.x to 1.9.4 [FLINK-21164] - Jar handlers don&#39;t cleanup temporarily extracted jars [FLINK-21210] - ApplicationClusterEntryPoints should explicitly close PackagedProgram [FLINK-21411] - The components on which Flink depends may contain vulnerabilities. If yes, fix them. [FLINK-21735] - Harden JobMaster#updateTaskExecutionState() [FLINK-22142] - Remove console logging for Kafka connector for AZP runs [FLINK-22208] - Bump snappy-java to 1.1.5+ [FLINK-22470] - The root cause of the exception encountered during compiling the job was not exposed to users in certain cases [FLINK-23312] - Use -Dfast for building e2e tests on AZP `}),e.add({id:135,href:"/2021/08/06/apache-flink-1.12.5-released/",title:"Apache Flink 1.12.5 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
+Release Notes - Flink - Version 1.11.4 Sub-task [FLINK-21070] - Overloaded aggregate functions cause converter errors [FLINK-21486] - Add sanity check when switching from Rocks to Heap timers Bug [FLINK-15262] - kafka connector doesn&#39;t read from beginning immediately when &#39;connector.startup-mode&#39; = &#39;earliest-offset&#39; [FLINK-16443] - Fix wrong fix for user-code CheckpointExceptions [FLINK-18438] - TaskManager start failed [FLINK-19369] - BlobClientTest.testGetFailsDuringStreamingForJobPermanentBlob hangs [FLINK-19436] - TPC-DS end-to-end test (Blink planner) failed during shutdown [FLINK-19771] - NullPointerException when accessing null array from postgres in JDBC Connector [FLINK-20288] - Correct documentation about savepoint self-contained [FLINK-20383] - DataSet allround end-to-end test fails with NullPointerException [FLINK-20626] - Canceling a job when it is failing will result in job hanging in CANCELING state [FLINK-20666] - Fix the deserialized Row losing the field_name information in PyFlink [FLINK-20675] - Asynchronous checkpoint failure would not fail the job anymore [FLINK-20680] - Fails to call var-arg function with no parameters [FLINK-20752] - FailureRateRestartBackoffTimeStrategy allows one less restart than configured [FLINK-20793] - Fix NamesTest due to code style refactor [FLINK-20803] - Version mismatch between spotless-maven-plugin and google-java-format plugin [FLINK-20832] - Deliver bootstrap resouces ourselves for website and documentation [FLINK-20841] - Fix compile error due to duplicated generated files [FLINK-20913] - Improve new HiveConf(jobConf, HiveConf.class) [FLINK-20989] - Functions in ExplodeFunctionUtil should handle null data to avoid NPE [FLINK-21008] - Residual HA related Kubernetes ConfigMaps and ZooKeeper nodes when cluster entrypoint received SIGTERM in shutdown [FLINK-21009] - Can not disable certain options in Elasticsearch 7 connector [FLINK-21013] - Blink planner does not ingest timestamp into StreamRecord [FLINK-21028] - Streaming application didn&#39;t stop properly [FLINK-21030] - Broken job restart for job with disjoint graph [FLINK-21071] - Snapshot branches running against flink-docker dev-master branch [FLINK-21132] - BoundedOneInput.endInput is called when taking synchronous savepoint [FLINK-21138] - KvStateServerHandler is not invoked with user code classloader [FLINK-21148] - YARNSessionFIFOSecuredITCase cannot connect to BlobServer [FLINK-21208] - pyarrow exception when using window with pandas udaf [FLINK-21213] - e2e test fail with &#39;As task is already not running, no longer decline checkpoint&#39; [FLINK-21215] - Checkpoint was declined because one input stream is finished [FLINK-21216] - StreamPandasConversionTests Fails [FLINK-21274] - At per-job mode, during the exit of the JobManager process, if ioExecutor exits at the end, the System.exit() method will not be executed. [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-21312] - SavepointITCase.testStopSavepointWithBoundedInputConcurrently is unstable [FLINK-21323] - Stop-with-savepoint is not supported by SourceOperatorStreamTask [FLINK-21453] - BoundedOneInput.endInput is NOT called when doing stop with savepoint WITH drain [FLINK-21497] - JobLeaderIdService completes leader future despite no leader being elected [FLINK-21550] - ZooKeeperHaServicesTest.testSimpleClose fail [FLINK-21606] - TaskManager connected to invalid JobManager leading to TaskSubmissionException [FLINK-21609] - SimpleRecoveryITCaseBase.testRestartMultipleTimes fails on azure [FLINK-21654] - YARNSessionCapacitySchedulerITCase.testStartYarnSessionClusterInQaTeamQueue fail because of NullPointerException [FLINK-21725] - DataTypeExtractor extracts wrong fields ordering for Tuple12 [FLINK-21753] - Cycle references between memory manager and gc cleaner action [FLINK-21980] - ZooKeeperRunningJobsRegistry creates an empty znode [FLINK-21986] - taskmanager native memory not release timely after restart [FLINK-22081] - Entropy key not resolved if flink-s3-fs-hadoop is added as a plugin [FLINK-22109] - Misleading exception message if the number of arguments of a nested function is incorrect [FLINK-22184] - Rest client shutdown on failure runs in netty thread [FLINK-22424] - Writing to already released buffers potentially causing data corruption during job failover/cancellation [FLINK-22489] - subtask backpressure indicator shows value for entire job [FLINK-22597] - JobMaster cannot be restarted [FLINK-22815] - Disable unaligned checkpoints for broadcast partitioning [FLINK-22946] - Network buffer deadlock introduced by unaligned checkpoint [FLINK-23164] - JobMasterTest.testMultipleStartsWork unstable on azure [FLINK-23166] - ZipUtils doesn&#39;t handle properly for softlinks inside the zip file Improvement [FLINK-9844] - PackagedProgram does not close URLClassLoader [FLINK-18182] - Upgrade AWS SDK in flink-connector-kinesis to include new region af-south-1 [FLINK-19415] - Move Hive document to &quot;Table &amp; SQL Connectors&quot; from &quot;Table API &amp; SQL&quot; [FLINK-20651] - Use Spotless/google-java-format for code formatting/enforcement [FLINK-20770] - Incorrect description for config option kubernetes.rest-service.exposed.type [FLINK-20790] - Generated classes should not be put under src/ directory [FLINK-20792] - Allow shorthand invocation of spotless [FLINK-20805] - Blink runtime classes partially ignored by spotless [FLINK-20866] - Add how to list jobs in Yarn deployment documentation when HA enabled [FLINK-20906] - Update copyright year to 2021 for NOTICE files [FLINK-21020] - Bump Jackson to 2.10.5[.1] / 2.12.1 [FLINK-21123] - Upgrade Beanutils 1.9.x to 1.9.4 [FLINK-21164] - Jar handlers don&#39;t cleanup temporarily extracted jars [FLINK-21210] - ApplicationClusterEntryPoints should explicitly close PackagedProgram [FLINK-21411] - The components on which Flink depends may contain vulnerabilities. If yes, fix them. [FLINK-21735] - Harden JobMaster#updateTaskExecutionState() [FLINK-22142] - Remove console logging for Kafka connector for AZP runs [FLINK-22208] - Bump snappy-java to 1.1.5+ [FLINK-22470] - The root cause of the exception encountered during compiling the job was not exposed to users in certain cases [FLINK-23312] - Use -Dfast for building e2e tests on AZP `}),e.add({id:136,href:"/2021/08/06/apache-flink-1.12.5-released/",title:"Apache Flink 1.12.5 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
 This release includes 76 fixes and minor improvements for Flink 1.12.4. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.12.5.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.12.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.5&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 Release Notes - Flink - Version 1.12.5
-Bug [FLINK-19925] - Errors$NativeIoException: readAddress(..) failed: Connection reset by peer [FLINK-20321] - Get NPE when using AvroDeserializationSchema to deserialize null input [FLINK-20888] - ContinuousFileReaderOperator should not close the output on close() [FLINK-21329] - &quot;Local recovery and sticky scheduling end-to-end test&quot; does not finish within 600 seconds [FLINK-21445] - Application mode does not set the configuration when building PackagedProgram [FLINK-21469] - stop-with-savepoint --drain doesn&#39;t advance watermark for sources chained to MultipleInputStreamTask [FLINK-21952] - Make all the &quot;Connection reset by peer&quot; exception wrapped as RemoteTransportException [FLINK-22015] - SQL filter containing OR and IS NULL will produce an incorrect result. [FLINK-22105] - SubtaskCheckpointCoordinatorTest.testForceAlignedCheckpointResultingInPriorityEvents unstable [FLINK-22157] - Join &amp; Select a part of composite primary key will cause ArrayIndexOutOfBoundsException [FLINK-22312] - YARNSessionFIFOSecuredITCase&gt;YARNSessionFIFOITCase.checkForProhibitedLogContents due to the heartbeat exception with Yarn RM [FLINK-22408] - Flink Table Parsr Hive Drop Partitions Syntax unparse is Error [FLINK-22419] - testScheduleRunAsync fail [FLINK-22434] - Dispatcher does not store suspended jobs in execution graph store [FLINK-22443] - can not be execute an extreme long sql under batch mode [FLINK-22494] - Avoid discarding checkpoints in case of failure [FLINK-22496] - ClusterEntrypointTest.testCloseAsyncShouldBeExecutedInShutdownHook failed [FLINK-22502] - DefaultCompletedCheckpointStore drops unrecoverable checkpoints silently [FLINK-22547] - OperatorCoordinatorHolderTest. verifyCheckpointEventOrderWhenCheckpointFutureCompletesLate fail [FLINK-22564] - Kubernetes-related ITCases do not fail even in case of failure [FLINK-22592] - numBuffersInLocal is always zero when using unaligned checkpoints [FLINK-22613] - FlinkKinesisITCase.testStopWithSavepoint fails [FLINK-22683] - The total Flink/process memory of memoryConfiguration in /taskmanagers can be null or incorrect value [FLINK-22698] - RabbitMQ source does not stop unless message arrives in queue [FLINK-22704] - ZooKeeperHaServicesTest.testCleanupJobData failed [FLINK-22721] - Breaking HighAvailabilityServices interface by adding new method [FLINK-22733] - Type mismatch thrown in DataStream.union if parameter is KeyedStream for Python DataStream API [FLINK-22756] - DispatcherTest.testJobStatusIsShownDuringTermination fail [FLINK-22788] - Code of equals method grows beyond 64 KB [FLINK-22814] - New sources are not defining/exposing checkpointStartDelayNanos metric [FLINK-22815] - Disable unaligned checkpoints for broadcast partitioning [FLINK-22819] - YARNFileReplicationITCase fails with &quot;The YARN application unexpectedly switched to state FAILED during deployment&quot; [FLINK-22820] - Stopping Yarn session cluster will cause fatal error [FLINK-22833] - Source tasks (both old and new) are not reporting checkpointStartDelay via CheckpointMetrics [FLINK-22856] - Move our Azure pipelines away from Ubuntu 16.04 by September [FLINK-22886] - Thread leak in RocksDBStateUploader [FLINK-22898] - HiveParallelismInference limit return wrong parallelism [FLINK-22908] - FileExecutionGraphInfoStoreTest.testPutSuspendedJobOnClusterShutdown should wait until job is running [FLINK-22927] - Exception on JobClient.get_job_status().result() [FLINK-22946] - Network buffer deadlock introduced by unaligned checkpoint [FLINK-22952] - docs_404_check fail on azure due to ruby version not available [FLINK-22963] - The description of taskmanager.memory.task.heap.size in the official document is incorrect [FLINK-22964] - Connector-base exposes dependency to flink-core. [FLINK-22987] - Scala suffix check isn&#39;t working [FLINK-23010] - HivePartitionFetcherContextBase::getComparablePartitionValueList can return partitions that don&#39;t exist [FLINK-23030] - PartitionRequestClientFactory#createPartitionRequestClient should throw when network failure [FLINK-23045] - RunnablesTest.testExecutorService_uncaughtExceptionHandler fails on azure [FLINK-23074] - There is a class conflict between flink-connector-hive and flink-parquet [FLINK-23076] - DispatcherTest.testWaitingForJobMasterLeadership fails on azure [FLINK-23119] - Fix the issue that the exception that General Python UDAF is unsupported is not thrown in Compile Stage. [FLINK-23120] - ByteArrayWrapperSerializer.serialize should use writeInt to serialize the length [FLINK-23133] - The dependencies are not handled properly when mixing use of Python Table API and Python DataStream API [FLINK-23135] - Flink SQL Error while applying rule AggregateReduceGroupingRule [FLINK-23164] - JobMasterTest.testMultipleStartsWork unstable on azure [FLINK-23166] - ZipUtils doesn&#39;t handle properly for softlinks inside the zip file [FLINK-23182] - Connection leak in RMQSource [FLINK-23184] - CompileException Assignment conversion not possible from type &quot;int&quot; to type &quot;short&quot; [FLINK-23201] - The check on alignmentDurationNanos seems to be too strict [FLINK-23223] - When flushAlways is enabled the subpartition may lose notification of data availability [FLINK-23233] - OperatorEventSendingCheckpointITCase.testOperatorEventLostWithReaderFailure fails on azure [FLINK-23248] - SinkWriter is not closed when failing [FLINK-23417] - MiniClusterITCase.testHandleBatchJobsWhenNotEnoughSlot fails on Azure [FLINK-23429] - State Processor API failed with FileNotFoundException when working with state files on Cloud Storage Improvement [FLINK-17857] - Kubernetes and docker e2e tests could not run on Mac OS after migration [FLINK-18182] - Upgrade AWS SDK in flink-connector-kinesis to include new region af-south-1 [FLINK-20695] - Zookeeper node under leader and leaderlatch is not deleted after job finished [FLINK-21229] - Support ssl connection with schema registry format [FLINK-21411] - The components on which Flink depends may contain vulnerabilities. If yes, fix them. [FLINK-22708] - Propagate savepoint settings from StreamExecutionEnvironment to StreamGraph [FLINK-22747] - Update commons-io to 2.8 [FLINK-22757] - Update GCS documentation [FLINK-22774] - Update Kinesis SQL connector&#39;s Guava to 27.0-jre [FLINK-22939] - Generalize JDK switch in azure setup [FLINK-23009] - Bump up Guava in Kinesis Connector [FLINK-23052] - cron_snapshot_deployment_maven unstable on maven [FLINK-23312] - Use -Dfast for building e2e tests on AZP `}),e.add({id:136,href:"/2021/08/06/apache-flink-1.13.2-released/",title:"Apache Flink 1.13.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.13 series.
+Bug [FLINK-19925] - Errors$NativeIoException: readAddress(..) failed: Connection reset by peer [FLINK-20321] - Get NPE when using AvroDeserializationSchema to deserialize null input [FLINK-20888] - ContinuousFileReaderOperator should not close the output on close() [FLINK-21329] - &quot;Local recovery and sticky scheduling end-to-end test&quot; does not finish within 600 seconds [FLINK-21445] - Application mode does not set the configuration when building PackagedProgram [FLINK-21469] - stop-with-savepoint --drain doesn&#39;t advance watermark for sources chained to MultipleInputStreamTask [FLINK-21952] - Make all the &quot;Connection reset by peer&quot; exception wrapped as RemoteTransportException [FLINK-22015] - SQL filter containing OR and IS NULL will produce an incorrect result. [FLINK-22105] - SubtaskCheckpointCoordinatorTest.testForceAlignedCheckpointResultingInPriorityEvents unstable [FLINK-22157] - Join &amp; Select a part of composite primary key will cause ArrayIndexOutOfBoundsException [FLINK-22312] - YARNSessionFIFOSecuredITCase&gt;YARNSessionFIFOITCase.checkForProhibitedLogContents due to the heartbeat exception with Yarn RM [FLINK-22408] - Flink Table Parsr Hive Drop Partitions Syntax unparse is Error [FLINK-22419] - testScheduleRunAsync fail [FLINK-22434] - Dispatcher does not store suspended jobs in execution graph store [FLINK-22443] - can not be execute an extreme long sql under batch mode [FLINK-22494] - Avoid discarding checkpoints in case of failure [FLINK-22496] - ClusterEntrypointTest.testCloseAsyncShouldBeExecutedInShutdownHook failed [FLINK-22502] - DefaultCompletedCheckpointStore drops unrecoverable checkpoints silently [FLINK-22547] - OperatorCoordinatorHolderTest. verifyCheckpointEventOrderWhenCheckpointFutureCompletesLate fail [FLINK-22564] - Kubernetes-related ITCases do not fail even in case of failure [FLINK-22592] - numBuffersInLocal is always zero when using unaligned checkpoints [FLINK-22613] - FlinkKinesisITCase.testStopWithSavepoint fails [FLINK-22683] - The total Flink/process memory of memoryConfiguration in /taskmanagers can be null or incorrect value [FLINK-22698] - RabbitMQ source does not stop unless message arrives in queue [FLINK-22704] - ZooKeeperHaServicesTest.testCleanupJobData failed [FLINK-22721] - Breaking HighAvailabilityServices interface by adding new method [FLINK-22733] - Type mismatch thrown in DataStream.union if parameter is KeyedStream for Python DataStream API [FLINK-22756] - DispatcherTest.testJobStatusIsShownDuringTermination fail [FLINK-22788] - Code of equals method grows beyond 64 KB [FLINK-22814] - New sources are not defining/exposing checkpointStartDelayNanos metric [FLINK-22815] - Disable unaligned checkpoints for broadcast partitioning [FLINK-22819] - YARNFileReplicationITCase fails with &quot;The YARN application unexpectedly switched to state FAILED during deployment&quot; [FLINK-22820] - Stopping Yarn session cluster will cause fatal error [FLINK-22833] - Source tasks (both old and new) are not reporting checkpointStartDelay via CheckpointMetrics [FLINK-22856] - Move our Azure pipelines away from Ubuntu 16.04 by September [FLINK-22886] - Thread leak in RocksDBStateUploader [FLINK-22898] - HiveParallelismInference limit return wrong parallelism [FLINK-22908] - FileExecutionGraphInfoStoreTest.testPutSuspendedJobOnClusterShutdown should wait until job is running [FLINK-22927] - Exception on JobClient.get_job_status().result() [FLINK-22946] - Network buffer deadlock introduced by unaligned checkpoint [FLINK-22952] - docs_404_check fail on azure due to ruby version not available [FLINK-22963] - The description of taskmanager.memory.task.heap.size in the official document is incorrect [FLINK-22964] - Connector-base exposes dependency to flink-core. [FLINK-22987] - Scala suffix check isn&#39;t working [FLINK-23010] - HivePartitionFetcherContextBase::getComparablePartitionValueList can return partitions that don&#39;t exist [FLINK-23030] - PartitionRequestClientFactory#createPartitionRequestClient should throw when network failure [FLINK-23045] - RunnablesTest.testExecutorService_uncaughtExceptionHandler fails on azure [FLINK-23074] - There is a class conflict between flink-connector-hive and flink-parquet [FLINK-23076] - DispatcherTest.testWaitingForJobMasterLeadership fails on azure [FLINK-23119] - Fix the issue that the exception that General Python UDAF is unsupported is not thrown in Compile Stage. [FLINK-23120] - ByteArrayWrapperSerializer.serialize should use writeInt to serialize the length [FLINK-23133] - The dependencies are not handled properly when mixing use of Python Table API and Python DataStream API [FLINK-23135] - Flink SQL Error while applying rule AggregateReduceGroupingRule [FLINK-23164] - JobMasterTest.testMultipleStartsWork unstable on azure [FLINK-23166] - ZipUtils doesn&#39;t handle properly for softlinks inside the zip file [FLINK-23182] - Connection leak in RMQSource [FLINK-23184] - CompileException Assignment conversion not possible from type &quot;int&quot; to type &quot;short&quot; [FLINK-23201] - The check on alignmentDurationNanos seems to be too strict [FLINK-23223] - When flushAlways is enabled the subpartition may lose notification of data availability [FLINK-23233] - OperatorEventSendingCheckpointITCase.testOperatorEventLostWithReaderFailure fails on azure [FLINK-23248] - SinkWriter is not closed when failing [FLINK-23417] - MiniClusterITCase.testHandleBatchJobsWhenNotEnoughSlot fails on Azure [FLINK-23429] - State Processor API failed with FileNotFoundException when working with state files on Cloud Storage Improvement [FLINK-17857] - Kubernetes and docker e2e tests could not run on Mac OS after migration [FLINK-18182] - Upgrade AWS SDK in flink-connector-kinesis to include new region af-south-1 [FLINK-20695] - Zookeeper node under leader and leaderlatch is not deleted after job finished [FLINK-21229] - Support ssl connection with schema registry format [FLINK-21411] - The components on which Flink depends may contain vulnerabilities. If yes, fix them. [FLINK-22708] - Propagate savepoint settings from StreamExecutionEnvironment to StreamGraph [FLINK-22747] - Update commons-io to 2.8 [FLINK-22757] - Update GCS documentation [FLINK-22774] - Update Kinesis SQL connector&#39;s Guava to 27.0-jre [FLINK-22939] - Generalize JDK switch in azure setup [FLINK-23009] - Bump up Guava in Kinesis Connector [FLINK-23052] - cron_snapshot_deployment_maven unstable on maven [FLINK-23312] - Use -Dfast for building e2e tests on AZP `}),e.add({id:137,href:"/2021/08/06/apache-flink-1.13.2-released/",title:"Apache Flink 1.13.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.13 series.
 This release includes 127 fixes and minor improvements for Flink 1.13.2. The list below includes bugfixes and improvements. For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.13.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.13.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.13.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.13.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-Release Notes - Flink - Version 1.13.2 Sub-task [FLINK-22726] - Hive GROUPING__ID returns different value in older versions Bug [FLINK-20888] - ContinuousFileReaderOperator should not close the output on close() [FLINK-20975] - HiveTableSourceITCase.testPartitionFilter fails on AZP [FLINK-21389] - ParquetInputFormat should not need parquet schema as user input [FLINK-21445] - Application mode does not set the configuration when building PackagedProgram [FLINK-21952] - Make all the &quot;Connection reset by peer&quot; exception wrapped as RemoteTransportException [FLINK-22045] - Set log level for shaded zookeeper logger [FLINK-22195] - YARNHighAvailabilityITCase.testClusterClientRetrieval because of TestTimedOutException [FLINK-22203] - KafkaChangelogTableITCase.testKafkaCanalChangelogSource fail due to ConcurrentModificationException [FLINK-22272] - Some scenes can&#39;t drop table by hive catalog [FLINK-22312] - YARNSessionFIFOSecuredITCase&gt;YARNSessionFIFOITCase.checkForProhibitedLogContents due to the heartbeat exception with Yarn RM [FLINK-22376] - SequentialChannelStateReaderImpl may recycle buffer twice [FLINK-22443] - can not be execute an extreme long sql under batch mode [FLINK-22462] - JdbcExactlyOnceSinkE2eTest.testInsert failed because of too many clients. [FLINK-22464] - OperatorEventSendingCheckpointITCase.testOperatorEventLostWithReaderFailure hangs with \`AdaptiveScheduler\` [FLINK-22492] - KinesisTableApiITCase with wrong results [FLINK-22496] - ClusterEntrypointTest.testCloseAsyncShouldBeExecutedInShutdownHook failed [FLINK-22545] - JVM crashes when runing OperatorEventSendingCheckpointITCase.testOperatorEventAckLost [FLINK-22547] - OperatorCoordinatorHolderTest. verifyCheckpointEventOrderWhenCheckpointFutureCompletesLate fail [FLINK-22613] - FlinkKinesisITCase.testStopWithSavepoint fails [FLINK-22662] - YARNHighAvailabilityITCase.testKillYarnSessionClusterEntrypoint fail [FLINK-22683] - The total Flink/process memory of memoryConfiguration in /taskmanagers can be null or incorrect value [FLINK-22686] - Incompatible subtask mappings while resuming from unaligned checkpoints [FLINK-22689] - Table API Documentation Row-Based Operations Example Fails [FLINK-22698] - RabbitMQ source does not stop unless message arrives in queue [FLINK-22725] - SlotManagers should unregister metrics at the start of suspend() [FLINK-22730] - Lookup join condition with CURRENT_DATE fails to filter records [FLINK-22746] - Links to connectors in docs are broken [FLINK-22759] - Correct the applicability of RocksDB related options as per operator [FLINK-22760] - HiveParser::setCurrentTimestamp fails with hive-3.1.2 [FLINK-22777] - Restore lost sections in Try Flink DataStream API example [FLINK-22779] - KafkaChangelogTableITCase.testKafkaDebeziumChangelogSource fail due to ConcurrentModificationException [FLINK-22786] - sql-client can not create .flink-sql-history file [FLINK-22795] - Throw better exception when executing remote SQL file in SQL Client [FLINK-22796] - Update mem_setup_tm documentation [FLINK-22814] - New sources are not defining/exposing checkpointStartDelayNanos metric [FLINK-22815] - Disable unaligned checkpoints for broadcast partitioning [FLINK-22819] - YARNFileReplicationITCase fails with &quot;The YARN application unexpectedly switched to state FAILED during deployment&quot; [FLINK-22820] - Stopping Yarn session cluster will cause fatal error [FLINK-22833] - Source tasks (both old and new) are not reporting checkpointStartDelay via CheckpointMetrics [FLINK-22856] - Move our Azure pipelines away from Ubuntu 16.04 by September [FLINK-22863] - ArrayIndexOutOfBoundsException may happen when building rescale edges [FLINK-22884] - Select view columns fail when store metadata with hive [FLINK-22886] - Thread leak in RocksDBStateUploader [FLINK-22890] - Few tests fail in HiveTableSinkITCase [FLINK-22894] - Window Top-N should allow n=1 [FLINK-22898] - HiveParallelismInference limit return wrong parallelism [FLINK-22908] - FileExecutionGraphInfoStoreTest.testPutSuspendedJobOnClusterShutdown should wait until job is running [FLINK-22927] - Exception on JobClient.get_job_status().result() [FLINK-22945] - StackOverflowException can happen when a large scale job is CANCELING/FAILING [FLINK-22946] - Network buffer deadlock introduced by unaligned checkpoint [FLINK-22948] - Scala example for toDataStream does not compile [FLINK-22952] - docs_404_check fail on azure due to ruby version not available [FLINK-22961] - Incorrect calculation of alignment timeout for LocalInputChannel [FLINK-22963] - The description of taskmanager.memory.task.heap.size in the official document is incorrect [FLINK-22964] - Connector-base exposes dependency to flink-core. [FLINK-22966] - NPE in StateAssignmentOperation when rescaling [FLINK-22980] - FileExecutionGraphInfoStoreTest hangs on azure [FLINK-22982] - java.lang.ClassCastException when using Python UDF [FLINK-22987] - Scala suffix check isn&#39;t working [FLINK-22993] - CompactFileWriter won&#39;t emit EndCheckpoint with Long.MAX_VALUE checkpointId [FLINK-23001] - flink-avro-glue-schema-registry lacks scala suffix [FLINK-23003] - Resource leak in RocksIncrementalSnapshotStrategy [FLINK-23010] - HivePartitionFetcherContextBase::getComparablePartitionValueList can return partitions that don&#39;t exist [FLINK-23018] - State factories should handle extended state descriptors [FLINK-23024] - RPC result TaskManagerInfoWithSlots not serializable [FLINK-23025] - sink-buffer-max-rows and sink-buffer-flush-interval options produce a lot of duplicates [FLINK-23030] - PartitionRequestClientFactory#createPartitionRequestClient should throw when network failure [FLINK-23034] - NPE in JobDetailsDeserializer during the reading old version of ExecutionState [FLINK-23045] - RunnablesTest.testExecutorService_uncaughtExceptionHandler fails on azure [FLINK-23073] - Fix space handling in Row CSV timestamp parser [FLINK-23074] - There is a class conflict between flink-connector-hive and flink-parquet [FLINK-23092] - Built-in UDAFs could not be mixed use with Python UDAF in group window [FLINK-23096] - HiveParser could not attach the sessionstate of hive [FLINK-23119] - Fix the issue that the exception that General Python UDAF is unsupported is not thrown in Compile Stage. [FLINK-23120] - ByteArrayWrapperSerializer.serialize should use writeInt to serialize the length [FLINK-23121] - Fix the issue that the InternalRow as arguments in Python UDAF [FLINK-23129] - When cancelling any running job of multiple jobs in an application cluster, JobManager shuts down [FLINK-23133] - The dependencies are not handled properly when mixing use of Python Table API and Python DataStream API [FLINK-23151] - KinesisTableApiITCase.testTableApiSourceAndSink fails on azure [FLINK-23166] - ZipUtils doesn&#39;t handle properly for softlinks inside the zip file [FLINK-23182] - Connection leak in RMQSource [FLINK-23184] - CompileException Assignment conversion not possible from type &quot;int&quot; to type &quot;short&quot; [FLINK-23188] - Unsupported function definition: IFNULL. Only user defined functions are supported as inline functions [FLINK-23196] - JobMasterITCase fail on azure due to BindException [FLINK-23201] - The check on alignmentDurationNanos seems to be too strict [FLINK-23223] - When flushAlways is enabled the subpartition may lose notification of data availability [FLINK-23233] - OperatorEventSendingCheckpointITCase.testOperatorEventLostWithReaderFailure fails on azure [FLINK-23235] - SinkITCase.writerAndCommitterAndGlobalCommitterExecuteInStreamingMode fails on azure [FLINK-23248] - SinkWriter is not closed when failing [FLINK-23259] - [DOCS]The &#39;window&#39; link on page docs/dev/datastream/operators/overview is failed and 404 is returned [FLINK-23260] - [DOCS]The link on page docs/libs/gelly/overview is failed and 404 is returned [FLINK-23270] - Impove description of Regular Joins section [FLINK-23280] - Python ExplainDetails does not have JSON_EXECUTION_PLAN option [FLINK-23306] - FlinkRelMdUniqueKeys causes exception when used with new Schema [FLINK-23359] - Fix the number of available slots in testResourceCanBeAllocatedForDifferentJobAfterFree [FLINK-23368] - Fix the wrong mapping of state cache in PyFlink [FLINK-23429] - State Processor API failed with FileNotFoundException when working with state files on Cloud Storage New Feature [FLINK-22770] - Expose SET/RESET from the parser Improvement [FLINK-18182] - Upgrade AWS SDK in flink-connector-kinesis to include new region af-south-1 [FLINK-20140] - Add documentation of TableResult.collect for Python Table API [FLINK-21229] - Support ssl connection with schema registry format [FLINK-21393] - Implement ParquetAvroInputFormat [FLINK-21411] - The components on which Flink depends may contain vulnerabilities. If yes, fix them. [FLINK-22528] - Document latency tracking metrics for state accesses [FLINK-22638] - Keep channels blocked on alignment timeout [FLINK-22655] - When using -i &lt;init.sql&gt; option to initialize SQL Client session It should be possible to annotate the script with -- [FLINK-22722] - Add Documentation for Kafka New Source [FLINK-22747] - Update commons-io to 2.8 [FLINK-22766] - Report metrics of KafkaConsumer in Kafka new source [FLINK-22774] - Update Kinesis SQL connector&#39;s Guava to 27.0-jre [FLINK-22855] - Translate the &#39;Overview of Python API&#39; page into Chinese. [FLINK-22873] - Add ToC to configuration documentation [FLINK-22905] - Fix missing comma in SQL example in &quot;Versioned Table&quot; page [FLINK-22939] - Generalize JDK switch in azure setup [FLINK-22996] - The description about coalesce is wrong [FLINK-23009] - Bump up Guava in Kinesis Connector [FLINK-23052] - cron_snapshot_deployment_maven unstable on maven [FLINK-23138] - Raise an exception if types other than PickledBytesTypeInfo are specified for state descriptor [FLINK-23156] - Change the reference of &#39;docs/dev/table/sql/queries&#39; [FLINK-23157] - Fix missing comma in SQL example in &quot;Versioned View&quot; page [FLINK-23162] - Create table uses time_ltz in the column name and it&#39;s expression which results in exception [FLINK-23168] - Catalog shouldn&#39;t merge properties for alter DB operation [FLINK-23178] - Raise an error for writing stream data into partitioned hive tables without a partition committer [FLINK-23200] - Correct grammatical mistakes in &#39;Table API&#39; page of &#39;Table API &amp; SQL&#39; [FLINK-23226] - Flink Chinese doc learn-flink/etl transformation.svg display issue [FLINK-23312] - Use -Dfast for building e2e tests on AZP `}),e.add({id:137,href:"/2021/07/07/how-to-identify-the-source-of-backpressure/",title:"How to identify the source of backpressure?",section:"Flink Blog",content:` Backpressure monitoring in the web UI
+Release Notes - Flink - Version 1.13.2 Sub-task [FLINK-22726] - Hive GROUPING__ID returns different value in older versions Bug [FLINK-20888] - ContinuousFileReaderOperator should not close the output on close() [FLINK-20975] - HiveTableSourceITCase.testPartitionFilter fails on AZP [FLINK-21389] - ParquetInputFormat should not need parquet schema as user input [FLINK-21445] - Application mode does not set the configuration when building PackagedProgram [FLINK-21952] - Make all the &quot;Connection reset by peer&quot; exception wrapped as RemoteTransportException [FLINK-22045] - Set log level for shaded zookeeper logger [FLINK-22195] - YARNHighAvailabilityITCase.testClusterClientRetrieval because of TestTimedOutException [FLINK-22203] - KafkaChangelogTableITCase.testKafkaCanalChangelogSource fail due to ConcurrentModificationException [FLINK-22272] - Some scenes can&#39;t drop table by hive catalog [FLINK-22312] - YARNSessionFIFOSecuredITCase&gt;YARNSessionFIFOITCase.checkForProhibitedLogContents due to the heartbeat exception with Yarn RM [FLINK-22376] - SequentialChannelStateReaderImpl may recycle buffer twice [FLINK-22443] - can not be execute an extreme long sql under batch mode [FLINK-22462] - JdbcExactlyOnceSinkE2eTest.testInsert failed because of too many clients. [FLINK-22464] - OperatorEventSendingCheckpointITCase.testOperatorEventLostWithReaderFailure hangs with \`AdaptiveScheduler\` [FLINK-22492] - KinesisTableApiITCase with wrong results [FLINK-22496] - ClusterEntrypointTest.testCloseAsyncShouldBeExecutedInShutdownHook failed [FLINK-22545] - JVM crashes when runing OperatorEventSendingCheckpointITCase.testOperatorEventAckLost [FLINK-22547] - OperatorCoordinatorHolderTest. verifyCheckpointEventOrderWhenCheckpointFutureCompletesLate fail [FLINK-22613] - FlinkKinesisITCase.testStopWithSavepoint fails [FLINK-22662] - YARNHighAvailabilityITCase.testKillYarnSessionClusterEntrypoint fail [FLINK-22683] - The total Flink/process memory of memoryConfiguration in /taskmanagers can be null or incorrect value [FLINK-22686] - Incompatible subtask mappings while resuming from unaligned checkpoints [FLINK-22689] - Table API Documentation Row-Based Operations Example Fails [FLINK-22698] - RabbitMQ source does not stop unless message arrives in queue [FLINK-22725] - SlotManagers should unregister metrics at the start of suspend() [FLINK-22730] - Lookup join condition with CURRENT_DATE fails to filter records [FLINK-22746] - Links to connectors in docs are broken [FLINK-22759] - Correct the applicability of RocksDB related options as per operator [FLINK-22760] - HiveParser::setCurrentTimestamp fails with hive-3.1.2 [FLINK-22777] - Restore lost sections in Try Flink DataStream API example [FLINK-22779] - KafkaChangelogTableITCase.testKafkaDebeziumChangelogSource fail due to ConcurrentModificationException [FLINK-22786] - sql-client can not create .flink-sql-history file [FLINK-22795] - Throw better exception when executing remote SQL file in SQL Client [FLINK-22796] - Update mem_setup_tm documentation [FLINK-22814] - New sources are not defining/exposing checkpointStartDelayNanos metric [FLINK-22815] - Disable unaligned checkpoints for broadcast partitioning [FLINK-22819] - YARNFileReplicationITCase fails with &quot;The YARN application unexpectedly switched to state FAILED during deployment&quot; [FLINK-22820] - Stopping Yarn session cluster will cause fatal error [FLINK-22833] - Source tasks (both old and new) are not reporting checkpointStartDelay via CheckpointMetrics [FLINK-22856] - Move our Azure pipelines away from Ubuntu 16.04 by September [FLINK-22863] - ArrayIndexOutOfBoundsException may happen when building rescale edges [FLINK-22884] - Select view columns fail when store metadata with hive [FLINK-22886] - Thread leak in RocksDBStateUploader [FLINK-22890] - Few tests fail in HiveTableSinkITCase [FLINK-22894] - Window Top-N should allow n=1 [FLINK-22898] - HiveParallelismInference limit return wrong parallelism [FLINK-22908] - FileExecutionGraphInfoStoreTest.testPutSuspendedJobOnClusterShutdown should wait until job is running [FLINK-22927] - Exception on JobClient.get_job_status().result() [FLINK-22945] - StackOverflowException can happen when a large scale job is CANCELING/FAILING [FLINK-22946] - Network buffer deadlock introduced by unaligned checkpoint [FLINK-22948] - Scala example for toDataStream does not compile [FLINK-22952] - docs_404_check fail on azure due to ruby version not available [FLINK-22961] - Incorrect calculation of alignment timeout for LocalInputChannel [FLINK-22963] - The description of taskmanager.memory.task.heap.size in the official document is incorrect [FLINK-22964] - Connector-base exposes dependency to flink-core. [FLINK-22966] - NPE in StateAssignmentOperation when rescaling [FLINK-22980] - FileExecutionGraphInfoStoreTest hangs on azure [FLINK-22982] - java.lang.ClassCastException when using Python UDF [FLINK-22987] - Scala suffix check isn&#39;t working [FLINK-22993] - CompactFileWriter won&#39;t emit EndCheckpoint with Long.MAX_VALUE checkpointId [FLINK-23001] - flink-avro-glue-schema-registry lacks scala suffix [FLINK-23003] - Resource leak in RocksIncrementalSnapshotStrategy [FLINK-23010] - HivePartitionFetcherContextBase::getComparablePartitionValueList can return partitions that don&#39;t exist [FLINK-23018] - State factories should handle extended state descriptors [FLINK-23024] - RPC result TaskManagerInfoWithSlots not serializable [FLINK-23025] - sink-buffer-max-rows and sink-buffer-flush-interval options produce a lot of duplicates [FLINK-23030] - PartitionRequestClientFactory#createPartitionRequestClient should throw when network failure [FLINK-23034] - NPE in JobDetailsDeserializer during the reading old version of ExecutionState [FLINK-23045] - RunnablesTest.testExecutorService_uncaughtExceptionHandler fails on azure [FLINK-23073] - Fix space handling in Row CSV timestamp parser [FLINK-23074] - There is a class conflict between flink-connector-hive and flink-parquet [FLINK-23092] - Built-in UDAFs could not be mixed use with Python UDAF in group window [FLINK-23096] - HiveParser could not attach the sessionstate of hive [FLINK-23119] - Fix the issue that the exception that General Python UDAF is unsupported is not thrown in Compile Stage. [FLINK-23120] - ByteArrayWrapperSerializer.serialize should use writeInt to serialize the length [FLINK-23121] - Fix the issue that the InternalRow as arguments in Python UDAF [FLINK-23129] - When cancelling any running job of multiple jobs in an application cluster, JobManager shuts down [FLINK-23133] - The dependencies are not handled properly when mixing use of Python Table API and Python DataStream API [FLINK-23151] - KinesisTableApiITCase.testTableApiSourceAndSink fails on azure [FLINK-23166] - ZipUtils doesn&#39;t handle properly for softlinks inside the zip file [FLINK-23182] - Connection leak in RMQSource [FLINK-23184] - CompileException Assignment conversion not possible from type &quot;int&quot; to type &quot;short&quot; [FLINK-23188] - Unsupported function definition: IFNULL. Only user defined functions are supported as inline functions [FLINK-23196] - JobMasterITCase fail on azure due to BindException [FLINK-23201] - The check on alignmentDurationNanos seems to be too strict [FLINK-23223] - When flushAlways is enabled the subpartition may lose notification of data availability [FLINK-23233] - OperatorEventSendingCheckpointITCase.testOperatorEventLostWithReaderFailure fails on azure [FLINK-23235] - SinkITCase.writerAndCommitterAndGlobalCommitterExecuteInStreamingMode fails on azure [FLINK-23248] - SinkWriter is not closed when failing [FLINK-23259] - [DOCS]The &#39;window&#39; link on page docs/dev/datastream/operators/overview is failed and 404 is returned [FLINK-23260] - [DOCS]The link on page docs/libs/gelly/overview is failed and 404 is returned [FLINK-23270] - Impove description of Regular Joins section [FLINK-23280] - Python ExplainDetails does not have JSON_EXECUTION_PLAN option [FLINK-23306] - FlinkRelMdUniqueKeys causes exception when used with new Schema [FLINK-23359] - Fix the number of available slots in testResourceCanBeAllocatedForDifferentJobAfterFree [FLINK-23368] - Fix the wrong mapping of state cache in PyFlink [FLINK-23429] - State Processor API failed with FileNotFoundException when working with state files on Cloud Storage New Feature [FLINK-22770] - Expose SET/RESET from the parser Improvement [FLINK-18182] - Upgrade AWS SDK in flink-connector-kinesis to include new region af-south-1 [FLINK-20140] - Add documentation of TableResult.collect for Python Table API [FLINK-21229] - Support ssl connection with schema registry format [FLINK-21393] - Implement ParquetAvroInputFormat [FLINK-21411] - The components on which Flink depends may contain vulnerabilities. If yes, fix them. [FLINK-22528] - Document latency tracking metrics for state accesses [FLINK-22638] - Keep channels blocked on alignment timeout [FLINK-22655] - When using -i &lt;init.sql&gt; option to initialize SQL Client session It should be possible to annotate the script with -- [FLINK-22722] - Add Documentation for Kafka New Source [FLINK-22747] - Update commons-io to 2.8 [FLINK-22766] - Report metrics of KafkaConsumer in Kafka new source [FLINK-22774] - Update Kinesis SQL connector&#39;s Guava to 27.0-jre [FLINK-22855] - Translate the &#39;Overview of Python API&#39; page into Chinese. [FLINK-22873] - Add ToC to configuration documentation [FLINK-22905] - Fix missing comma in SQL example in &quot;Versioned Table&quot; page [FLINK-22939] - Generalize JDK switch in azure setup [FLINK-22996] - The description about coalesce is wrong [FLINK-23009] - Bump up Guava in Kinesis Connector [FLINK-23052] - cron_snapshot_deployment_maven unstable on maven [FLINK-23138] - Raise an exception if types other than PickledBytesTypeInfo are specified for state descriptor [FLINK-23156] - Change the reference of &#39;docs/dev/table/sql/queries&#39; [FLINK-23157] - Fix missing comma in SQL example in &quot;Versioned View&quot; page [FLINK-23162] - Create table uses time_ltz in the column name and it&#39;s expression which results in exception [FLINK-23168] - Catalog shouldn&#39;t merge properties for alter DB operation [FLINK-23178] - Raise an error for writing stream data into partitioned hive tables without a partition committer [FLINK-23200] - Correct grammatical mistakes in &#39;Table API&#39; page of &#39;Table API &amp; SQL&#39; [FLINK-23226] - Flink Chinese doc learn-flink/etl transformation.svg display issue [FLINK-23312] - Use -Dfast for building e2e tests on AZP `}),e.add({id:138,href:"/2021/07/07/how-to-identify-the-source-of-backpressure/",title:"How to identify the source of backpressure?",section:"Flink Blog",content:` Backpressure monitoring in the web UI
 The backpressure topic was tackled from different angles over the last couple of years. However, when it comes to identifying and analyzing sources of backpressure, things have changed quite a bit in the recent Flink releases (especially with new additions to metrics and the web UI in Flink 1.13). This post will try to clarify some of these changes and go into more detail about how to track down the source of backpressure, but first&hellip;
 What is backpressure? # This has been explained very well in an old, but still accurate, post by Ufuk Celebi. I highly recommend reading it if you are not familiar with this concept. For a much deeper and low-level understanding of the topic and how Flink’s network stack works, there is a more advanced explanation available here.
 At a high level, backpressure happens if some operator(s) in the Job Graph cannot process records at the same rate as they are received. This fills up the input buffers of the subtask that is running this slow operator. Once the input buffers are full, backpressure propagates to the output buffers of the upstream subtasks. Once those are filled up, the upstream subtasks are also forced to slow down their records’ processing rate to match the processing rate of the operator causing this bottleneck down the stream. Backpressure further propagates up the stream until it reaches the source operators.
@@ -2447,18 +2457,18 @@
 Identifying the presence of backpressure. Locating which subtask(s) or machines are causing it. Digging deeper into what part of the code is causing it and which resource is scarce. Backpressure monitoring improvements and metrics can help you with the first two points. To tackle the last one, profiling the code can be the way to go. To help with profiling, also starting from Flink 1.13, Flame Graphs are integrated into Flink&rsquo;s web UI. Flame Graphs is a well known profiling tool and visualization technique and I encourage you to give it a try.
 But keep in mind that after locating where the bottleneck is, you can analyze it the same way you would any other non-distributed application (by checking resource utilization, attaching a profiler, etc). Usually there is no silver bullet for problems like this. You can try to scale up but sometimes it might not be easy or practical to do.
 Anyway&hellip; The aforementioned improvements to backpressure monitoring allow us to easily detect the source of backpressure, and Flame Graphs can help us to analyze why a particular subtask is causing problems. Together those two features should make the previously quite tedious process of debugging and performance analysis of Flink jobs that much easier! Please upgrade to Flink 1.13.x and try them out!
-[1] There is a third possibility. In a rare case when network exchange is actually the bottleneck in your job, the downstream task will have empty input buffers, while upstream output buffers will be full. `}),e.add({id:138,href:"/2021/05/28/apache-flink-1.13.1-released/",title:"Apache Flink 1.13.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.13 series.
+[1] There is a third possibility. In a rare case when network exchange is actually the bottleneck in your job, the downstream task will have empty input buffers, while upstream output buffers will be full. `}),e.add({id:139,href:"/2021/05/28/apache-flink-1.13.1-released/",title:"Apache Flink 1.13.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.13 series.
 This release includes 82 fixes and minor improvements for Flink 1.13.1. The list below includes bugfixes and improvements. For a complete list of all changes see: JIRA.
 We highly recommend all users to upgrade to Flink 1.13.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.13.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.13.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.13.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-Release Notes - Flink - Version 1.13.1 Sub-task [FLINK-22378] - Type mismatch when declaring SOURCE_WATERMARK on TIMESTAMP_LTZ column [FLINK-22666] - Make structured type&#39;s fields more lenient during casting Bug [FLINK-12351] - AsyncWaitOperator should deep copy StreamElement when object reuse is enabled [FLINK-17170] - Cannot stop streaming job with savepoint which uses kinesis consumer [FLINK-19449] - LEAD/LAG cannot work correctly in streaming mode [FLINK-21181] - Buffer pool is destroyed error when outputting data over a timer after cancellation. [FLINK-21247] - flink iceberg table map&lt;string,string&gt; cannot convert to datastream [FLINK-21469] - stop-with-savepoint --drain doesn&#39;t advance watermark for sources chained to MultipleInputStreamTask [FLINK-21923] - SplitAggregateRule will be abnormal, when the sum/count and avg in SQL at the same time [FLINK-22109] - Misleading exception message if the number of arguments of a nested function is incorrect [FLINK-22294] - Hive reading fail when getting file numbers on different filesystem nameservices [FLINK-22355] - Simple Task Manager Memory Model image does not show up [FLINK-22356] - Filesystem/Hive partition file is not committed when watermark is applied on rowtime of TIMESTAMP_LTZ type [FLINK-22408] - Flink Table Parsr Hive Drop Partitions Syntax unparse is Error [FLINK-22424] - Writing to already released buffers potentially causing data corruption during job failover/cancellation [FLINK-22431] - AdaptiveScheduler does not log failure cause when recovering [FLINK-22434] - Dispatcher does not store suspended jobs in execution graph store [FLINK-22438] - add numRecordsOut metric for Async IO [FLINK-22442] - Using scala api to change the TimeCharacteristic of the PatternStream is invalid [FLINK-22463] - IllegalArgumentException is thrown in WindowAttachedWindowingStrategy when two phase is enabled for distinct agg [FLINK-22479] - [Kinesis][Consumer] Potential lock-up under error condition [FLINK-22489] - subtask backpressure indicator shows value for entire job [FLINK-22494] - Avoid discarding checkpoints in case of failure [FLINK-22502] - DefaultCompletedCheckpointStore drops unrecoverable checkpoints silently [FLINK-22511] - Fix the bug of non-composite result type in Python TableAggregateFunction [FLINK-22512] - Can&#39;t call current_timestamp with hive dialect for hive-3.1 [FLINK-22522] - BytesHashMap has many verbose logs [FLINK-22523] - TUMBLE TVF should throw helpful exception when specifying second interval parameter [FLINK-22525] - The zone id in exception message should be GMT+08:00 instead of GMT+8:00 [FLINK-22535] - Resource leak would happen if exception thrown during AbstractInvokable#restore of task life [FLINK-22555] - LGPL-2.1 files in flink-python jars [FLINK-22573] - AsyncIO can timeout elements after completion [FLINK-22574] - Adaptive Scheduler: Can not cancel restarting job [FLINK-22592] - numBuffersInLocal is always zero when using unaligned checkpoints [FLINK-22596] - Active timeout is not triggered if there were no barriers [FLINK-22618] - Fix incorrect free resource metrics of task managers [FLINK-22654] - SqlCreateTable toString()/unparse() lose CONSTRAINTS and watermarks [FLINK-22661] - HiveInputFormatPartitionReader can return invalid data [FLINK-22688] - Root Exception can not be shown on Web UI in Flink 1.13.0 [FLINK-22706] - Source NOTICE outdated regarding docs/ [FLINK-22721] - Breaking HighAvailabilityServices interface by adding new method [FLINK-22733] - Type mismatch thrown in DataStream.union if parameter is KeyedStream for Python DataStream API Improvement [FLINK-18952] - Add 10 minutes to DataStream API documentation [FLINK-20695] - Zookeeper node under leader and leaderlatch is not deleted after job finished [FLINK-22250] - flink-sql-parser model Class ParserResource lack ParserResource.properties [FLINK-22301] - Statebackend and CheckpointStorage type is not shown in the Web UI [FLINK-22304] - Refactor some interfaces for TVF based window to improve the extendability [FLINK-22470] - The root cause of the exception encountered during compiling the job was not exposed to users in certain cases [FLINK-22560] - Filter maven metadata from all jars [FLINK-22699] - Make ConstantArgumentCount public API [FLINK-22708] - Propagate savepoint settings from StreamExecutionEnvironment to StreamGraph [FLINK-22725] - SlotManagers should unregister metrics at the start of suspend() `}),e.add({id:139,href:"/2021/05/21/apache-flink-1.12.4-released/",title:"Apache Flink 1.12.4 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
+Release Notes - Flink - Version 1.13.1 Sub-task [FLINK-22378] - Type mismatch when declaring SOURCE_WATERMARK on TIMESTAMP_LTZ column [FLINK-22666] - Make structured type&#39;s fields more lenient during casting Bug [FLINK-12351] - AsyncWaitOperator should deep copy StreamElement when object reuse is enabled [FLINK-17170] - Cannot stop streaming job with savepoint which uses kinesis consumer [FLINK-19449] - LEAD/LAG cannot work correctly in streaming mode [FLINK-21181] - Buffer pool is destroyed error when outputting data over a timer after cancellation. [FLINK-21247] - flink iceberg table map&lt;string,string&gt; cannot convert to datastream [FLINK-21469] - stop-with-savepoint --drain doesn&#39;t advance watermark for sources chained to MultipleInputStreamTask [FLINK-21923] - SplitAggregateRule will be abnormal, when the sum/count and avg in SQL at the same time [FLINK-22109] - Misleading exception message if the number of arguments of a nested function is incorrect [FLINK-22294] - Hive reading fail when getting file numbers on different filesystem nameservices [FLINK-22355] - Simple Task Manager Memory Model image does not show up [FLINK-22356] - Filesystem/Hive partition file is not committed when watermark is applied on rowtime of TIMESTAMP_LTZ type [FLINK-22408] - Flink Table Parsr Hive Drop Partitions Syntax unparse is Error [FLINK-22424] - Writing to already released buffers potentially causing data corruption during job failover/cancellation [FLINK-22431] - AdaptiveScheduler does not log failure cause when recovering [FLINK-22434] - Dispatcher does not store suspended jobs in execution graph store [FLINK-22438] - add numRecordsOut metric for Async IO [FLINK-22442] - Using scala api to change the TimeCharacteristic of the PatternStream is invalid [FLINK-22463] - IllegalArgumentException is thrown in WindowAttachedWindowingStrategy when two phase is enabled for distinct agg [FLINK-22479] - [Kinesis][Consumer] Potential lock-up under error condition [FLINK-22489] - subtask backpressure indicator shows value for entire job [FLINK-22494] - Avoid discarding checkpoints in case of failure [FLINK-22502] - DefaultCompletedCheckpointStore drops unrecoverable checkpoints silently [FLINK-22511] - Fix the bug of non-composite result type in Python TableAggregateFunction [FLINK-22512] - Can&#39;t call current_timestamp with hive dialect for hive-3.1 [FLINK-22522] - BytesHashMap has many verbose logs [FLINK-22523] - TUMBLE TVF should throw helpful exception when specifying second interval parameter [FLINK-22525] - The zone id in exception message should be GMT+08:00 instead of GMT+8:00 [FLINK-22535] - Resource leak would happen if exception thrown during AbstractInvokable#restore of task life [FLINK-22555] - LGPL-2.1 files in flink-python jars [FLINK-22573] - AsyncIO can timeout elements after completion [FLINK-22574] - Adaptive Scheduler: Can not cancel restarting job [FLINK-22592] - numBuffersInLocal is always zero when using unaligned checkpoints [FLINK-22596] - Active timeout is not triggered if there were no barriers [FLINK-22618] - Fix incorrect free resource metrics of task managers [FLINK-22654] - SqlCreateTable toString()/unparse() lose CONSTRAINTS and watermarks [FLINK-22661] - HiveInputFormatPartitionReader can return invalid data [FLINK-22688] - Root Exception can not be shown on Web UI in Flink 1.13.0 [FLINK-22706] - Source NOTICE outdated regarding docs/ [FLINK-22721] - Breaking HighAvailabilityServices interface by adding new method [FLINK-22733] - Type mismatch thrown in DataStream.union if parameter is KeyedStream for Python DataStream API Improvement [FLINK-18952] - Add 10 minutes to DataStream API documentation [FLINK-20695] - Zookeeper node under leader and leaderlatch is not deleted after job finished [FLINK-22250] - flink-sql-parser model Class ParserResource lack ParserResource.properties [FLINK-22301] - Statebackend and CheckpointStorage type is not shown in the Web UI [FLINK-22304] - Refactor some interfaces for TVF based window to improve the extendability [FLINK-22470] - The root cause of the exception encountered during compiling the job was not exposed to users in certain cases [FLINK-22560] - Filter maven metadata from all jars [FLINK-22699] - Make ConstantArgumentCount public API [FLINK-22708] - Propagate savepoint settings from StreamExecutionEnvironment to StreamGraph [FLINK-22725] - SlotManagers should unregister metrics at the start of suspend() `}),e.add({id:140,href:"/2021/05/21/apache-flink-1.12.4-released/",title:"Apache Flink 1.12.4 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
 This release includes 21 fixes and minor improvements for Flink 1.12.3. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.12.4.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.12.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.4&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 Release Notes - Flink - Version 1.12.4
-Bug [FLINK-17170] - Cannot stop streaming job with savepoint which uses kinesis consumer [FLINK-20114] - Fix a few KafkaSource-related bugs [FLINK-21181] - Buffer pool is destroyed error when outputting data over a timer after cancellation. [FLINK-22109] - Misleading exception message if the number of arguments of a nested function is incorrect [FLINK-22368] - UnalignedCheckpointITCase hangs on azure [FLINK-22424] - Writing to already released buffers potentially causing data corruption during job failover/cancellation [FLINK-22438] - add numRecordsOut metric for Async IO [FLINK-22442] - Using scala api to change the TimeCharacteristic of the PatternStream is invalid [FLINK-22479] - [Kinesis][Consumer] Potential lock-up under error condition [FLINK-22489] - subtask backpressure indicator shows value for entire job [FLINK-22555] - LGPL-2.1 files in flink-python jars [FLINK-22557] - Japicmp fails on 1.12 branch [FLINK-22573] - AsyncIO can timeout elements after completion [FLINK-22577] - KubernetesLeaderElectionAndRetrievalITCase is failing [FLINK-22597] - JobMaster cannot be restarted Improvement [FLINK-18952] - Add 10 minutes to DataStream API documentation [FLINK-20553] - Add end-to-end test case for new Kafka source [FLINK-22470] - The root cause of the exception encountered during compiling the job was not exposed to users in certain cases [FLINK-22539] - Restructure the Python dependency management documentation [FLINK-22544] - Add the missing documentation about the command line options for PyFlink [FLINK-22560] - Filter maven metadata from all jars `}),e.add({id:140,href:"/2021/05/06/scaling-flink-automatically-with-reactive-mode/",title:"Scaling Flink automatically with Reactive Mode",section:"Flink Blog",content:` Introduction # Streaming jobs which run for several days or longer usually experience variations in workload during their lifetime. These variations can originate from seasonal spikes, such as day vs. night, weekdays vs. weekend or holidays vs. non-holidays, sudden events or simply the growing popularity of your product. Although some of these variations are more predictable than others, in all cases there is a change in job resource demand that needs to be addressed if you want to ensure the same quality of service for your customers.
+Bug [FLINK-17170] - Cannot stop streaming job with savepoint which uses kinesis consumer [FLINK-20114] - Fix a few KafkaSource-related bugs [FLINK-21181] - Buffer pool is destroyed error when outputting data over a timer after cancellation. [FLINK-22109] - Misleading exception message if the number of arguments of a nested function is incorrect [FLINK-22368] - UnalignedCheckpointITCase hangs on azure [FLINK-22424] - Writing to already released buffers potentially causing data corruption during job failover/cancellation [FLINK-22438] - add numRecordsOut metric for Async IO [FLINK-22442] - Using scala api to change the TimeCharacteristic of the PatternStream is invalid [FLINK-22479] - [Kinesis][Consumer] Potential lock-up under error condition [FLINK-22489] - subtask backpressure indicator shows value for entire job [FLINK-22555] - LGPL-2.1 files in flink-python jars [FLINK-22557] - Japicmp fails on 1.12 branch [FLINK-22573] - AsyncIO can timeout elements after completion [FLINK-22577] - KubernetesLeaderElectionAndRetrievalITCase is failing [FLINK-22597] - JobMaster cannot be restarted Improvement [FLINK-18952] - Add 10 minutes to DataStream API documentation [FLINK-20553] - Add end-to-end test case for new Kafka source [FLINK-22470] - The root cause of the exception encountered during compiling the job was not exposed to users in certain cases [FLINK-22539] - Restructure the Python dependency management documentation [FLINK-22544] - Add the missing documentation about the command line options for PyFlink [FLINK-22560] - Filter maven metadata from all jars `}),e.add({id:141,href:"/2021/05/06/scaling-flink-automatically-with-reactive-mode/",title:"Scaling Flink automatically with Reactive Mode",section:"Flink Blog",content:` Introduction # Streaming jobs which run for several days or longer usually experience variations in workload during their lifetime. These variations can originate from seasonal spikes, such as day vs. night, weekdays vs. weekend or holidays vs. non-holidays, sudden events or simply the growing popularity of your product. Although some of these variations are more predictable than others, in all cases there is a change in job resource demand that needs to be addressed if you want to ensure the same quality of service for your customers.
 A simple way of quantifying the mismatch between the required resources and the available resources is to measure the space between the actual load and the number of available workers. As pictured below, in the case of static resource allocation, you can see that there&rsquo;s a big gap between the actual load and the available workers — hence, we are wasting resources. For elastic resource allocation, the gap between the red and black line is consistently small.
 Manually rescaling a Flink job has been possible since Flink 1.2 introduced rescalable state, which allows you to stop-and-restore a job with a different parallelism. For example, if your job is running with a parallelism of p=100 and your load increases, you can restart it with p=200 to cope with the additional data.
 The problem with this approach is that you have to orchestrate a rescale operation with custom tools by yourself, including error handling and similar tasks.
@@ -2496,7 +2506,7 @@
 Conclusion # In this blog post, we&rsquo;ve introduced Reactive Mode, a big step forward in Flink&rsquo;s ability to dynamically adjust to changing workloads, reducing resource utilization and overall costs. The blog post demonstrated Reactive Mode on Kubernetes, including some lessons learned.
 Reactive Mode is new feature in Flink 1.13 and is currently in the MVP (Minimal Viable Product) phase of product development. Before experimenting with it, or using it in production, please check the documentation, in particular the current limitations section. In this phase, the biggest limitation is that only standalone application mode deployments are supported (i.e. no active resource managers or session clusters).
 The community is actively looking for feedback on this feature, to continue improving Flink&rsquo;s resource elasticity. If you have any feedback, please reach out to the dev@ mailing list or to me personally on Twitter.
-`}),e.add({id:141,href:"/2021/05/03/apache-flink-1.13.0-release-announcement/",title:"Apache Flink 1.13.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink 1.13.0! More than 200 contributors worked on over 1,000 issues for this new version.
+`}),e.add({id:142,href:"/2021/05/03/apache-flink-1.13.0-release-announcement/",title:"Apache Flink 1.13.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink 1.13.0! More than 200 contributors worked on over 1,000 issues for this new version.
 The release brings us a big step forward in one of our major efforts: Making Stream Processing Applications as natural and as simple to manage as any other application. The new reactive scaling mode means that scaling streaming applications in and out now works like in any other application by just changing the number of parallel processes.
 The release also prominently features a series of improvements that help users better understand the performance of applications. When the streams don&rsquo;t flow as fast as you&rsquo;d hope, these can help you to understand why: Load and backpressure visualization to identify bottlenecks, CPU flame graphs to identify hot code paths in your application, and State Access Latencies to see how the State Backends are keeping up.
 Beyond those features, the Flink community has added a ton of improvements all over the system, some of which we discuss in this article. We hope you enjoy the new release and features. Towards the end of the article, we describe changes to be aware of when upgrading from earlier versions of Apache Flink.
@@ -2601,13 +2611,13 @@
 You can also check the complete release changelog and updated documentation for a detailed list of changes and new features.
 List of Contributors # The Apache Flink community would like to thank each one of the contributors that have made this release possible:
 acqua.csq, AkisAya, Alexander Fedulov, Aljoscha Krettek, Ammar Al-Batool, Andrey Zagrebin, anlen321, Anton Kalashnikov, appleyuchi, Arvid Heise, Austin Cawley-Edwards, austin ce, azagrebin, blublinsky, Brian Zhou, bytesmithing, caozhen1937, chen qin, Chesnay Schepler, Congxian Qiu, Cristian, cxiiiiiii, Danny Chan, Danny Cranmer, David Anderson, Dawid Wysakowicz, dbgp2021, Dian Fu, DinoZhang, dixingxing, Dong Lin, Dylan Forciea, est08zw, Etienne Chauchot, fanrui03, Flora Tao, FLRNKS, fornaix, fuyli, George, Giacomo Gamba, GitHub, godfrey he, GuoWei Ma, Gyula Fora, hackergin, hameizi, Haoyuan Ge, Harshvardhan Chauhan, Haseeb Asif, hehuiyuan, huangxiao, HuangXiao, huangxingbo, HuangXingBo, humengyu2012, huzekang, Hwanju Kim, Ingo Bürk, I. Raleigh, Ivan, iyupeng, Jack, Jane, Jark Wu, Jerry Wang, Jiangjie (Becket) Qin, JiangXin, Jiayi Liao, JieFang.He, Jie Wang, jinfeng, Jingsong Lee, JingsongLi, Jing Zhang, Joao Boto, JohnTeslaa, Jun Qin, kanata163, kevin.cyj, KevinyhZou, Kezhu Wang, klion26, Kostas Kloudas, kougazhang, Kurt Young, laughing, legendtkl, leiqiang, Leonard Xu, liaojiayi, Lijie Wang, liming.1018, lincoln lee, lincoln-lil, liushouwei, liuyufei, LM Kang, lometheus, luyb, Lyn Zhang, Maciej Obuchowski, Maciek Próchniak, mans2singh, Marek Sabo, Matthias Pohl, meijie, Mika Naylor, Miklos Gergely, Mohit Paliwal, Moritz Manner, morsapaes, Mulan, Nico Kruber, openopen2, paul8263, Paul Lam, Peidian li, pengkangjing, Peter Huang, Piotr Nowojski, Qinghui Xu, Qingsheng Ren, Raghav Kumar Gautam, Rainie Li, Ricky Burnett, Rion Williams, Robert Metzger, Roc Marshal, Roman, Roman Khachatryan, Ruguo, Ruguo Yu, Rui Li, Sebastian Liu, Seth Wiesman, sharkdtu, sharkdtu(涂小刚), Shengkai, shizhengchao, shouweikun, Shuo Cheng, simenliuxing, SteNicholas, Stephan Ewen, Suo Lu, sv3ndk, Svend Vanderveken, taox, Terry Wang, Thelgis Kotsos, Thesharing, Thomas Weise, Till Rohrmann, Timo Walther, Ting Sun, totoro, totorooo, TsReaper, Tzu-Li (Gordon) Tai, V1ncentzzZ, vthinkxie, wangfeifan, wangpeibin, wangyang0918, wangyemao-github, Wei Zhong, Wenlong Lyu, wineandcheeze, wjc, xiaoHoly, Xintong Song, xixingya, xmarker, Xue Wang, Yadong Xie, yangsanity, Yangze Guo, Yao Zhang, Yuan Mei, yulei0824, Yu Li, Yun Gao, Yun Tang, yuruguo, yushujun, Yuval Itzchakov, yuzhao.cyz, zck, zhangjunfan, zhangzhengqi3, zhao_wei_nan, zhaown, zhaoxing, Zhenghua Gao, Zhenqiu Huang, zhisheng, zhongqishang, zhushang, zhuxiaoshang, Zhu Zhu, zjuwangg, zoucao, zoudan, 左元, 星, 肖佳文, 龙三
-`}),e.add({id:142,href:"/2021/04/29/apache-flink-1.12.3-released/",title:"Apache Flink 1.12.3 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
+`}),e.add({id:143,href:"/2021/04/29/apache-flink-1.12.3-released/",title:"Apache Flink 1.12.3 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
 This release includes 73 fixes and minor improvements for Flink 1.12.2. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.12.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.12.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Bug [FLINK-18071] - CoordinatorEventsExactlyOnceITCase.checkListContainsSequence fails on CI [FLINK-20547] - Batch job fails due to the exception in network stack [FLINK-20654] - Unaligned checkpoint recovery may lead to corrupted data stream [FLINK-20722] - HiveTableSink should copy the record when converting RowData to Row [FLINK-20752] - FailureRateRestartBackoffTimeStrategy allows one less restart than configured [FLINK-20761] - Cannot read hive table/partition whose location path contains comma [FLINK-20977] - USE DATABASE &amp; USE CATALOG fails with quoted identifiers containing characters to be escaped in Flink SQL client [FLINK-21008] - Residual HA related Kubernetes ConfigMaps and ZooKeeper nodes when cluster entrypoint received SIGTERM in shutdown [FLINK-21012] - AvroFileFormatFactory uses non-deserializable lambda function [FLINK-21133] - FLIP-27 Source does not work with synchronous savepoint [FLINK-21148] - YARNSessionFIFOSecuredITCase cannot connect to BlobServer [FLINK-21159] - KafkaSourceEnumerator not sending NoMoreSplitsEvent to unassigned reader [FLINK-21178] - Task failure will not trigger master hook&#39;s reset() [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-21387] - DispatcherTest.testInvalidCallDuringInitialization times out on azp [FLINK-21388] - Parquet DECIMAL logical type is not properly supported in ParquetSchemaConverter [FLINK-21431] - UpsertKafkaTableITCase.testTemporalJoin hang [FLINK-21434] - When UDAF return ROW type, and the number of fields is more than 14, the crash happend [FLINK-21497] - JobLeaderIdService completes leader future despite no leader being elected [FLINK-21515] - SourceStreamTaskTest.testStopWithSavepointShouldNotInterruptTheSource is failing [FLINK-21518] - CheckpointCoordinatorTest.testMinCheckpointPause fails fatally on AZP [FLINK-21523] - ArrayIndexOutOfBoundsException occurs while run a hive streaming job with partitioned table source [FLINK-21535] - UnalignedCheckpointITCase.execute failed with &quot;OutOfMemoryError: Java heap space&quot; [FLINK-21550] - ZooKeeperHaServicesTest.testSimpleClose fail [FLINK-21552] - The managed memory was not released if exception was thrown in createPythonExecutionEnvironment [FLINK-21606] - TaskManager connected to invalid JobManager leading to TaskSubmissionException [FLINK-21609] - SimpleRecoveryITCaseBase.testRestartMultipleTimes fails on azure [FLINK-21654] - YARNSessionCapacitySchedulerITCase.testStartYarnSessionClusterInQaTeamQueue fail because of NullPointerException [FLINK-21661] - SHARD_GETRECORDS_INTERVAL_MILLIS wrong use? [FLINK-21685] - Flink JobManager failed to restart from checkpoint in kubernetes HA setup [FLINK-21691] - KafkaSource fails with NPE when setting it up [FLINK-21707] - Job is possible to hang when restarting a FINISHED task with POINTWISE BLOCKING consumers [FLINK-21710] - FlinkRelMdUniqueKeys gets incorrect result on TableScan after project push-down [FLINK-21725] - DataTypeExtractor extracts wrong fields ordering for Tuple12 [FLINK-21733] - WatermarkAssigner incorrectly recomputing the rowtime index which may cause ArrayIndexOutOfBoundsException [FLINK-21746] - flink sql fields in row access error about scalarfunction [FLINK-21753] - Cycle references between memory manager and gc cleaner action [FLINK-21817] - New Kafka Source might break subtask and split assignment upon rescale [FLINK-21833] - TemporalRowTimeJoinOperator.java will lead to the state expansion by short-life-cycle &amp; huge RowData, although config idle.state.retention.time [FLINK-21889] - source:canal-cdc , sink:upsert-kafka, print &quot;select * from sinkTable&quot;, throw NullException [FLINK-21922] - The method partition_by in Over doesn&#39;t work for expression dsl [FLINK-21933] - [kinesis][efo] EFO consumer treats interrupts as retryable exceptions [FLINK-21941] - testSavepointRescalingOutPartitionedOperatorStateList fail [FLINK-21942] - KubernetesLeaderRetrievalDriver not closed after terminated which lead to connection leak [FLINK-21944] - AbstractArrowPythonAggregateFunctionOperator.dispose should consider whether arrowSerializer is null [FLINK-21969] - PythonTimestampsAndWatermarksOperator emitted the Long.MAX_VALUE watermark before emitting all the data [FLINK-21980] - ZooKeeperRunningJobsRegistry creates an empty znode [FLINK-21986] - taskmanager native memory not release timely after restart [FLINK-21992] - Fix availability notification in UnionInputGate [FLINK-21996] - Transient RPC failure without TaskManager failure can lead to split assignment loss [FLINK-22006] - Could not run more than 20 jobs in a native K8s session when K8s HA enabled [FLINK-22024] - Maven: Entry has not been leased from this pool / fix for release 1.12 [FLINK-22053] - NumberSequenceSource causes fatal exception when less splits than parallelism. [FLINK-22055] - RPC main thread executor may schedule commands with wrong time unit of delay [FLINK-22061] - The DEFAULT_NON_SPLITTABLE_FILE_ENUMERATOR defined in FileSource should points to NonSplittingRecursiveEnumerator [FLINK-22081] - Entropy key not resolved if flink-s3-fs-hadoop is added as a plugin [FLINK-22082] - Nested projection push down doesn&#39;t work for data such as row(array(row)) [FLINK-22124] - The job finished without any exception if error was thrown during state access [FLINK-22172] - Fix the bug of shared resource among Python Operators of the same slot is not released [FLINK-22184] - Rest client shutdown on failure runs in netty thread [FLINK-22191] - PyFlinkStreamUserDefinedFunctionTests.test_udf_in_join_condition_2 fail due to NPE [FLINK-22327] - NPE exception happens if it throws exception in finishBundle during job shutdown [FLINK-22339] - Fix some encoding exceptions were not thrown in cython coders [FLINK-22345] - CoordinatorEventsExactlyOnceITCase hangs on azure [FLINK-22385] - Type mismatch in NetworkBufferPool Improvement [FLINK-20533] - Add histogram support to Datadog reporter [FLINK-21382] - Standalone K8s documentation does not explain usage of standby JobManagers [FLINK-21521] - Pretty print K8s specifications [FLINK-21690] - remove redundant tolerableCheckpointFailureNumber setting in CheckpointConfig [FLINK-21735] - Harden JobMaster#updateTaskExecutionState() [FLINK-22051] - Better document the distinction between stop-with-savepoint and stop-with-savepoint-with-drain [FLINK-22142] - Remove console logging for Kafka connector for AZP runs [FLINK-22208] - Bump snappy-java to 1.1.5+ [FLINK-22297] - Perform early check to ensure that the length of the result is the same as the input for Pandas UDF `}),e.add({id:143,href:"/2021/04/15/stateful-functions-3.0.0-remote-functions-front-and-center/",title:"Stateful Functions 3.0.0: Remote Functions Front and Center",section:"Flink Blog",content:`The Apache Flink community is happy to announce the release of Stateful Functions (StateFun) 3.0.0! Stateful Functions is a cross-platform stack for building Stateful Serverless applications, making it radically simpler to develop scalable, consistent, and elastic distributed applications.
+Bug [FLINK-18071] - CoordinatorEventsExactlyOnceITCase.checkListContainsSequence fails on CI [FLINK-20547] - Batch job fails due to the exception in network stack [FLINK-20654] - Unaligned checkpoint recovery may lead to corrupted data stream [FLINK-20722] - HiveTableSink should copy the record when converting RowData to Row [FLINK-20752] - FailureRateRestartBackoffTimeStrategy allows one less restart than configured [FLINK-20761] - Cannot read hive table/partition whose location path contains comma [FLINK-20977] - USE DATABASE &amp; USE CATALOG fails with quoted identifiers containing characters to be escaped in Flink SQL client [FLINK-21008] - Residual HA related Kubernetes ConfigMaps and ZooKeeper nodes when cluster entrypoint received SIGTERM in shutdown [FLINK-21012] - AvroFileFormatFactory uses non-deserializable lambda function [FLINK-21133] - FLIP-27 Source does not work with synchronous savepoint [FLINK-21148] - YARNSessionFIFOSecuredITCase cannot connect to BlobServer [FLINK-21159] - KafkaSourceEnumerator not sending NoMoreSplitsEvent to unassigned reader [FLINK-21178] - Task failure will not trigger master hook&#39;s reset() [FLINK-21289] - Application mode ignores the pipeline.classpaths configuration [FLINK-21387] - DispatcherTest.testInvalidCallDuringInitialization times out on azp [FLINK-21388] - Parquet DECIMAL logical type is not properly supported in ParquetSchemaConverter [FLINK-21431] - UpsertKafkaTableITCase.testTemporalJoin hang [FLINK-21434] - When UDAF return ROW type, and the number of fields is more than 14, the crash happend [FLINK-21497] - JobLeaderIdService completes leader future despite no leader being elected [FLINK-21515] - SourceStreamTaskTest.testStopWithSavepointShouldNotInterruptTheSource is failing [FLINK-21518] - CheckpointCoordinatorTest.testMinCheckpointPause fails fatally on AZP [FLINK-21523] - ArrayIndexOutOfBoundsException occurs while run a hive streaming job with partitioned table source [FLINK-21535] - UnalignedCheckpointITCase.execute failed with &quot;OutOfMemoryError: Java heap space&quot; [FLINK-21550] - ZooKeeperHaServicesTest.testSimpleClose fail [FLINK-21552] - The managed memory was not released if exception was thrown in createPythonExecutionEnvironment [FLINK-21606] - TaskManager connected to invalid JobManager leading to TaskSubmissionException [FLINK-21609] - SimpleRecoveryITCaseBase.testRestartMultipleTimes fails on azure [FLINK-21654] - YARNSessionCapacitySchedulerITCase.testStartYarnSessionClusterInQaTeamQueue fail because of NullPointerException [FLINK-21661] - SHARD_GETRECORDS_INTERVAL_MILLIS wrong use? [FLINK-21685] - Flink JobManager failed to restart from checkpoint in kubernetes HA setup [FLINK-21691] - KafkaSource fails with NPE when setting it up [FLINK-21707] - Job is possible to hang when restarting a FINISHED task with POINTWISE BLOCKING consumers [FLINK-21710] - FlinkRelMdUniqueKeys gets incorrect result on TableScan after project push-down [FLINK-21725] - DataTypeExtractor extracts wrong fields ordering for Tuple12 [FLINK-21733] - WatermarkAssigner incorrectly recomputing the rowtime index which may cause ArrayIndexOutOfBoundsException [FLINK-21746] - flink sql fields in row access error about scalarfunction [FLINK-21753] - Cycle references between memory manager and gc cleaner action [FLINK-21817] - New Kafka Source might break subtask and split assignment upon rescale [FLINK-21833] - TemporalRowTimeJoinOperator.java will lead to the state expansion by short-life-cycle &amp; huge RowData, although config idle.state.retention.time [FLINK-21889] - source:canal-cdc , sink:upsert-kafka, print &quot;select * from sinkTable&quot;, throw NullException [FLINK-21922] - The method partition_by in Over doesn&#39;t work for expression dsl [FLINK-21933] - [kinesis][efo] EFO consumer treats interrupts as retryable exceptions [FLINK-21941] - testSavepointRescalingOutPartitionedOperatorStateList fail [FLINK-21942] - KubernetesLeaderRetrievalDriver not closed after terminated which lead to connection leak [FLINK-21944] - AbstractArrowPythonAggregateFunctionOperator.dispose should consider whether arrowSerializer is null [FLINK-21969] - PythonTimestampsAndWatermarksOperator emitted the Long.MAX_VALUE watermark before emitting all the data [FLINK-21980] - ZooKeeperRunningJobsRegistry creates an empty znode [FLINK-21986] - taskmanager native memory not release timely after restart [FLINK-21992] - Fix availability notification in UnionInputGate [FLINK-21996] - Transient RPC failure without TaskManager failure can lead to split assignment loss [FLINK-22006] - Could not run more than 20 jobs in a native K8s session when K8s HA enabled [FLINK-22024] - Maven: Entry has not been leased from this pool / fix for release 1.12 [FLINK-22053] - NumberSequenceSource causes fatal exception when less splits than parallelism. [FLINK-22055] - RPC main thread executor may schedule commands with wrong time unit of delay [FLINK-22061] - The DEFAULT_NON_SPLITTABLE_FILE_ENUMERATOR defined in FileSource should points to NonSplittingRecursiveEnumerator [FLINK-22081] - Entropy key not resolved if flink-s3-fs-hadoop is added as a plugin [FLINK-22082] - Nested projection push down doesn&#39;t work for data such as row(array(row)) [FLINK-22124] - The job finished without any exception if error was thrown during state access [FLINK-22172] - Fix the bug of shared resource among Python Operators of the same slot is not released [FLINK-22184] - Rest client shutdown on failure runs in netty thread [FLINK-22191] - PyFlinkStreamUserDefinedFunctionTests.test_udf_in_join_condition_2 fail due to NPE [FLINK-22327] - NPE exception happens if it throws exception in finishBundle during job shutdown [FLINK-22339] - Fix some encoding exceptions were not thrown in cython coders [FLINK-22345] - CoordinatorEventsExactlyOnceITCase hangs on azure [FLINK-22385] - Type mismatch in NetworkBufferPool Improvement [FLINK-20533] - Add histogram support to Datadog reporter [FLINK-21382] - Standalone K8s documentation does not explain usage of standby JobManagers [FLINK-21521] - Pretty print K8s specifications [FLINK-21690] - remove redundant tolerableCheckpointFailureNumber setting in CheckpointConfig [FLINK-21735] - Harden JobMaster#updateTaskExecutionState() [FLINK-22051] - Better document the distinction between stop-with-savepoint and stop-with-savepoint-with-drain [FLINK-22142] - Remove console logging for Kafka connector for AZP runs [FLINK-22208] - Bump snappy-java to 1.1.5+ [FLINK-22297] - Perform early check to ensure that the length of the result is the same as the input for Pandas UDF `}),e.add({id:144,href:"/2021/04/15/stateful-functions-3.0.0-remote-functions-front-and-center/",title:"Stateful Functions 3.0.0: Remote Functions Front and Center",section:"Flink Blog",content:`The Apache Flink community is happy to announce the release of Stateful Functions (StateFun) 3.0.0! Stateful Functions is a cross-platform stack for building Stateful Serverless applications, making it radically simpler to develop scalable, consistent, and elastic distributed applications.
 This new release brings remote functions to the front and center of StateFun, making the disaggregated setup that separates the application logic from the StateFun cluster the default. It is now easier, more efficient, and more ergonomic to write applications that live in their own processes or containers. With the new Java SDK this is now also possible for all JVM languages, in addition to Python.
 Background # Starting with the first StateFun release, before the project was donated to the Apache Software Foundation, our focus was: making scalable stateful applications easy to build and run.
 The first StateFun version introduced an SDK that allowed writing stateful functions that build up a StateFun application packaged and deployed as a particular Flink job submitted to a Flink cluster. Having functions executing within the same JVM as Flink has some advantages, such as the deployment&rsquo;s performance and immutability. However, it had a few limitations:
@@ -2632,7 +2642,7 @@
 For more details, check the updated documentation and the release notes for a detailed list of changes and new features if you plan to upgrade your setup to Stateful Functions 3.0.0. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA
 List of Contributors # The Apache Flink community would like to thank all contributors that have made this release possible:
 Authuir, Chesnay Schepler, David Anderson, Dian Fu, Frans King, Galen Warren, Guillaume Vauvert, Igal Shilman, Ismaël Mejía, Kartik Khare, Konstantin Knauf, Marta Paes Moreira, Patrick Lucas, Patrick Wiener, Rafi Aroch, Robert Metzger, RocMarshal, Seth Wiesman, Siddique Ahmad, SteNicholas, Stephan Ewen, Timothy Bess, Tymur Yarosh, Tzu-Li (Gordon) Tai, Ufuk Celebi, abc863377, billyrrr, congxianqiu, danp11, hequn8128, kaibo, klion26, morsapaes, slinkydeveloper, wangchao, wangzzu, winder If you’d like to get involved, we’re always looking for new contributors.
-`}),e.add({id:144,href:"/2021/03/11/a-rundown-of-batch-execution-mode-in-the-datastream-api/",title:"A Rundown of Batch Execution Mode in the DataStream API",section:"Flink Blog",content:`Flink has been following the mantra that Batch is a Special Case of Streaming since the very early days. As the project evolved to address specific uses cases, different core APIs ended up being implemented for batch (DataSet API) and streaming execution (DataStream API), but the higher-level Table API/SQL was subsequently designed following this mantra of unification. With Flink 1.12, the community worked on bringing a similarly unified behaviour to the DataStream API, and took the first steps towards enabling efficient batch execution in the DataStream API.
+`}),e.add({id:145,href:"/2021/03/11/a-rundown-of-batch-execution-mode-in-the-datastream-api/",title:"A Rundown of Batch Execution Mode in the DataStream API",section:"Flink Blog",content:`Flink has been following the mantra that Batch is a Special Case of Streaming since the very early days. As the project evolved to address specific uses cases, different core APIs ended up being implemented for batch (DataSet API) and streaming execution (DataStream API), but the higher-level Table API/SQL was subsequently designed following this mantra of unification. With Flink 1.12, the community worked on bringing a similarly unified behaviour to the DataStream API, and took the first steps towards enabling efficient batch execution in the DataStream API.
 The idea behind making the DataStream API a unified abstraction for batch and streaming execution instead of maintaining separate APIs is two-fold:
 Reusability: efficient batch and stream processing under the same API would allow you to easily switch between both execution modes without rewriting any code. So, a job could be easily reused to process real-time and historical data.
 Operational simplicity: providing a unified API would mean using a single set of connectors, maintaining a single codebase and being able to easily implement mixed execution pipelines e.g. for use cases like backfilling.
@@ -2666,13 +2676,13 @@
 Another approach is to run the exact same program first on the bounded data. However, this time we wouldn&rsquo;t assume completeness of the job; instead, we would produce the state of all operators up to a certain point in time and store it as a savepoint. Later on, we could use the savepoint to start the application on the unbounded data.
 Lastly, to achieve feature parity with the DataSet API (Flink&rsquo;s legacy API for batch-style execution), we are looking into the topic of iterations and how to meet the different usage patterns depending on the mode. In STREAMING mode, iterations serve as a loopback edge, but we don&rsquo;t necessarily need to keep track of the iteration step. On the other hand, the iteration generation is vital for Machine Learning (ML) algorithms, which are the primary use case for iterations in BATCH mode.
 Have you tried the new BATCH execution mode in the DataStream API? How was your experience? We are happy to hear your feedback and stories!
-`}),e.add({id:145,href:"/2021/03/03/apache-flink-1.12.2-released/",title:"Apache Flink 1.12.2 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
+`}),e.add({id:146,href:"/2021/03/03/apache-flink-1.12.2-released/",title:"Apache Flink 1.12.2 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.12 series.
 This release includes 83 fixes and minor improvements for Flink 1.12.1. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.12.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.12.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-21070] - Overloaded aggregate functions cause converter errors [FLINK-21486] - Add sanity check when switching from Rocks to Heap timers Bug [FLINK-12461] - Document binary compatibility situation with Scala beyond 2.12.8 [FLINK-16443] - Fix wrong fix for user-code CheckpointExceptions [FLINK-19771] - NullPointerException when accessing null array from postgres in JDBC Connector [FLINK-20309] - UnalignedCheckpointTestBase.execute is failed [FLINK-20462] - MailboxOperatorTest.testAvoidTaskStarvation [FLINK-20500] - UpsertKafkaTableITCase.testTemporalJoin test failed [FLINK-20565] - Fix typo in EXPLAIN Statements docs. [FLINK-20580] - Missing null value handling for SerializedValue&#39;s getByteArray() [FLINK-20654] - Unaligned checkpoint recovery may lead to corrupted data stream [FLINK-20663] - Managed memory may not be released in time when operators use managed memory frequently [FLINK-20675] - Asynchronous checkpoint failure would not fail the job anymore [FLINK-20680] - Fails to call var-arg function with no parameters [FLINK-20798] - Using PVC as high-availability.storageDir could not work [FLINK-20832] - Deliver bootstrap resouces ourselves for website and documentation [FLINK-20848] - Kafka consumer ID is not specified correctly in new KafkaSource [FLINK-20913] - Improve new HiveConf(jobConf, HiveConf.class) [FLINK-20921] - Fix Date/Time/Timestamp in Python DataStream [FLINK-20933] - Config Python Operator Use Managed Memory In Python DataStream [FLINK-20942] - Digest of FLOAT literals throws UnsupportedOperationException [FLINK-20944] - Launching in application mode requesting a ClusterIP rest service type results in an Exception [FLINK-20947] - Idle source doesn&#39;t work when pushing watermark into the source [FLINK-20961] - Flink throws NullPointerException for tables created from DataStream with no assigned timestamps and watermarks [FLINK-20992] - Checkpoint cleanup can kill JobMaster [FLINK-20998] - flink-raw-1.12.jar does not exist [FLINK-21009] - Can not disable certain options in Elasticsearch 7 connector [FLINK-21013] - Blink planner does not ingest timestamp into StreamRecord [FLINK-21024] - Dynamic properties get exposed to job&#39;s main method if user parameters are passed [FLINK-21028] - Streaming application didn&#39;t stop properly [FLINK-21030] - Broken job restart for job with disjoint graph [FLINK-21059] - KafkaSourceEnumerator does not honor consumer properties [FLINK-21069] - Configuration &quot;parallelism.default&quot; doesn&#39;t take effect for TableEnvironment#explainSql [FLINK-21071] - Snapshot branches running against flink-docker dev-master branch [FLINK-21104] - UnalignedCheckpointITCase.execute failed with &quot;IllegalStateException&quot; [FLINK-21132] - BoundedOneInput.endInput is called when taking synchronous savepoint [FLINK-21138] - KvStateServerHandler is not invoked with user code classloader [FLINK-21140] - Extract zip file dependencies before adding to PYTHONPATH [FLINK-21144] - KubernetesResourceManagerDriver#tryResetPodCreationCoolDown causes fatal error [FLINK-21155] - FileSourceTextLinesITCase.testBoundedTextFileSourceWithTaskManagerFailover does not pass [FLINK-21158] - wrong jvm metaspace and overhead size show in taskmanager metric page [FLINK-21163] - Python dependencies specified via CLI should not override the dependencies specified in configuration [FLINK-21169] - Kafka flink-connector-base dependency should be scope compile [FLINK-21208] - pyarrow exception when using window with pandas udaf [FLINK-21213] - e2e test fail with &#39;As task is already not running, no longer decline checkpoint&#39; [FLINK-21215] - Checkpoint was declined because one input stream is finished [FLINK-21216] - StreamPandasConversionTests Fails [FLINK-21225] - OverConvertRule does not consider distinct [FLINK-21226] - Reintroduce TableColumn.of for backwards compatibility [FLINK-21274] - At per-job mode, during the exit of the JobManager process, if ioExecutor exits at the end, the System.exit() method will not be executed. [FLINK-21277] - SQLClientSchemaRegistryITCase fails to download testcontainers/ryuk:0.3.0 [FLINK-21312] - SavepointITCase.testStopSavepointWithBoundedInputConcurrently is unstable [FLINK-21323] - Stop-with-savepoint is not supported by SourceOperatorStreamTask [FLINK-21351] - Incremental checkpoint data would be lost once a non-stop savepoint completed [FLINK-21361] - FlinkRelMdUniqueKeys matches on AbstractCatalogTable instead of CatalogTable [FLINK-21412] - pyflink DataTypes.DECIMAL is not available [FLINK-21452] - FLIP-27 sources cannot reliably downscale [FLINK-21453] - BoundedOneInput.endInput is NOT called when doing stop with savepoint WITH drain [FLINK-21490] - UnalignedCheckpointITCase fails on azure [FLINK-21492] - ActiveResourceManager swallows exception stack trace New Feature [FLINK-20359] - Support adding Owner Reference to Job Manager in native kubernetes setup Improvement [FLINK-9844] - PackagedProgram does not close URLClassLoader [FLINK-20417] - Handle &quot;Too old resource version&quot; exception in Kubernetes watch more gracefully [FLINK-20491] - Support Broadcast Operation in BATCH execution mode [FLINK-20517] - Support mixed keyed/non-keyed operations in BATCH execution mode [FLINK-20770] - Incorrect description for config option kubernetes.rest-service.exposed.type [FLINK-20907] - Table API documentation promotes deprecated syntax [FLINK-21020] - Bump Jackson to 20.10.5[.1] / 2.12.1 [FLINK-21034] - Rework jemalloc switch to use an environment variable [FLINK-21035] - Deduplicate copy_plugins_if_required calls [FLINK-21036] - Consider removing automatic configuration fo number of slots from docker [FLINK-21037] - Deduplicate configuration logic in docker entrypoint [FLINK-21042] - Fix code example in &quot;Aggregate Functions&quot; section in Table UDF page [FLINK-21048] - Refactor documentation related to switch memory allocator [FLINK-21123] - Upgrade Beanutils 1.9.x to 1.9.4 [FLINK-21164] - Jar handlers don&#39;t cleanup temporarily extracted jars [FLINK-21210] - ApplicationClusterEntryPoints should explicitly close PackagedProgram [FLINK-21381] - Kubernetes HA documentation does not state required service account and role Task [FLINK-20529] - Publish Dockerfiles for release 1.12.0 [FLINK-20534] - Add Flink 1.12 MigrationVersion [FLINK-20536] - Update migration tests in master to cover migration from release-1.12 [FLINK-20960] - Add warning in 1.12 release notes about potential corrupt data stream with unaligned checkpoint [FLINK-21358] - Missing snapshot version compatibility for 1.12 `}),e.add({id:146,href:"/2021/02/10/how-to-natively-deploy-flink-on-kubernetes-with-high-availability-ha/",title:"How to natively deploy Flink on Kubernetes with High-Availability (HA)",section:"Flink Blog",content:`Flink has supported resource management systems like YARN and Mesos since the early days; however, these were not designed for the fast-moving cloud-native architectures that are increasingly gaining popularity these days, or the growing need to support complex, mixed workloads (e.g. batch, streaming, deep learning, web services). For these reasons, more and more users are using Kubernetes to automate the deployment, scaling and management of their Flink applications.
+Sub-task [FLINK-21070] - Overloaded aggregate functions cause converter errors [FLINK-21486] - Add sanity check when switching from Rocks to Heap timers Bug [FLINK-12461] - Document binary compatibility situation with Scala beyond 2.12.8 [FLINK-16443] - Fix wrong fix for user-code CheckpointExceptions [FLINK-19771] - NullPointerException when accessing null array from postgres in JDBC Connector [FLINK-20309] - UnalignedCheckpointTestBase.execute is failed [FLINK-20462] - MailboxOperatorTest.testAvoidTaskStarvation [FLINK-20500] - UpsertKafkaTableITCase.testTemporalJoin test failed [FLINK-20565] - Fix typo in EXPLAIN Statements docs. [FLINK-20580] - Missing null value handling for SerializedValue&#39;s getByteArray() [FLINK-20654] - Unaligned checkpoint recovery may lead to corrupted data stream [FLINK-20663] - Managed memory may not be released in time when operators use managed memory frequently [FLINK-20675] - Asynchronous checkpoint failure would not fail the job anymore [FLINK-20680] - Fails to call var-arg function with no parameters [FLINK-20798] - Using PVC as high-availability.storageDir could not work [FLINK-20832] - Deliver bootstrap resouces ourselves for website and documentation [FLINK-20848] - Kafka consumer ID is not specified correctly in new KafkaSource [FLINK-20913] - Improve new HiveConf(jobConf, HiveConf.class) [FLINK-20921] - Fix Date/Time/Timestamp in Python DataStream [FLINK-20933] - Config Python Operator Use Managed Memory In Python DataStream [FLINK-20942] - Digest of FLOAT literals throws UnsupportedOperationException [FLINK-20944] - Launching in application mode requesting a ClusterIP rest service type results in an Exception [FLINK-20947] - Idle source doesn&#39;t work when pushing watermark into the source [FLINK-20961] - Flink throws NullPointerException for tables created from DataStream with no assigned timestamps and watermarks [FLINK-20992] - Checkpoint cleanup can kill JobMaster [FLINK-20998] - flink-raw-1.12.jar does not exist [FLINK-21009] - Can not disable certain options in Elasticsearch 7 connector [FLINK-21013] - Blink planner does not ingest timestamp into StreamRecord [FLINK-21024] - Dynamic properties get exposed to job&#39;s main method if user parameters are passed [FLINK-21028] - Streaming application didn&#39;t stop properly [FLINK-21030] - Broken job restart for job with disjoint graph [FLINK-21059] - KafkaSourceEnumerator does not honor consumer properties [FLINK-21069] - Configuration &quot;parallelism.default&quot; doesn&#39;t take effect for TableEnvironment#explainSql [FLINK-21071] - Snapshot branches running against flink-docker dev-master branch [FLINK-21104] - UnalignedCheckpointITCase.execute failed with &quot;IllegalStateException&quot; [FLINK-21132] - BoundedOneInput.endInput is called when taking synchronous savepoint [FLINK-21138] - KvStateServerHandler is not invoked with user code classloader [FLINK-21140] - Extract zip file dependencies before adding to PYTHONPATH [FLINK-21144] - KubernetesResourceManagerDriver#tryResetPodCreationCoolDown causes fatal error [FLINK-21155] - FileSourceTextLinesITCase.testBoundedTextFileSourceWithTaskManagerFailover does not pass [FLINK-21158] - wrong jvm metaspace and overhead size show in taskmanager metric page [FLINK-21163] - Python dependencies specified via CLI should not override the dependencies specified in configuration [FLINK-21169] - Kafka flink-connector-base dependency should be scope compile [FLINK-21208] - pyarrow exception when using window with pandas udaf [FLINK-21213] - e2e test fail with &#39;As task is already not running, no longer decline checkpoint&#39; [FLINK-21215] - Checkpoint was declined because one input stream is finished [FLINK-21216] - StreamPandasConversionTests Fails [FLINK-21225] - OverConvertRule does not consider distinct [FLINK-21226] - Reintroduce TableColumn.of for backwards compatibility [FLINK-21274] - At per-job mode, during the exit of the JobManager process, if ioExecutor exits at the end, the System.exit() method will not be executed. [FLINK-21277] - SQLClientSchemaRegistryITCase fails to download testcontainers/ryuk:0.3.0 [FLINK-21312] - SavepointITCase.testStopSavepointWithBoundedInputConcurrently is unstable [FLINK-21323] - Stop-with-savepoint is not supported by SourceOperatorStreamTask [FLINK-21351] - Incremental checkpoint data would be lost once a non-stop savepoint completed [FLINK-21361] - FlinkRelMdUniqueKeys matches on AbstractCatalogTable instead of CatalogTable [FLINK-21412] - pyflink DataTypes.DECIMAL is not available [FLINK-21452] - FLIP-27 sources cannot reliably downscale [FLINK-21453] - BoundedOneInput.endInput is NOT called when doing stop with savepoint WITH drain [FLINK-21490] - UnalignedCheckpointITCase fails on azure [FLINK-21492] - ActiveResourceManager swallows exception stack trace New Feature [FLINK-20359] - Support adding Owner Reference to Job Manager in native kubernetes setup Improvement [FLINK-9844] - PackagedProgram does not close URLClassLoader [FLINK-20417] - Handle &quot;Too old resource version&quot; exception in Kubernetes watch more gracefully [FLINK-20491] - Support Broadcast Operation in BATCH execution mode [FLINK-20517] - Support mixed keyed/non-keyed operations in BATCH execution mode [FLINK-20770] - Incorrect description for config option kubernetes.rest-service.exposed.type [FLINK-20907] - Table API documentation promotes deprecated syntax [FLINK-21020] - Bump Jackson to 20.10.5[.1] / 2.12.1 [FLINK-21034] - Rework jemalloc switch to use an environment variable [FLINK-21035] - Deduplicate copy_plugins_if_required calls [FLINK-21036] - Consider removing automatic configuration fo number of slots from docker [FLINK-21037] - Deduplicate configuration logic in docker entrypoint [FLINK-21042] - Fix code example in &quot;Aggregate Functions&quot; section in Table UDF page [FLINK-21048] - Refactor documentation related to switch memory allocator [FLINK-21123] - Upgrade Beanutils 1.9.x to 1.9.4 [FLINK-21164] - Jar handlers don&#39;t cleanup temporarily extracted jars [FLINK-21210] - ApplicationClusterEntryPoints should explicitly close PackagedProgram [FLINK-21381] - Kubernetes HA documentation does not state required service account and role Task [FLINK-20529] - Publish Dockerfiles for release 1.12.0 [FLINK-20534] - Add Flink 1.12 MigrationVersion [FLINK-20536] - Update migration tests in master to cover migration from release-1.12 [FLINK-20960] - Add warning in 1.12 release notes about potential corrupt data stream with unaligned checkpoint [FLINK-21358] - Missing snapshot version compatibility for 1.12 `}),e.add({id:147,href:"/2021/02/10/how-to-natively-deploy-flink-on-kubernetes-with-high-availability-ha/",title:"How to natively deploy Flink on Kubernetes with High-Availability (HA)",section:"Flink Blog",content:`Flink has supported resource management systems like YARN and Mesos since the early days; however, these were not designed for the fast-moving cloud-native architectures that are increasingly gaining popularity these days, or the growing need to support complex, mixed workloads (e.g. batch, streaming, deep learning, web services). For these reasons, more and more users are using Kubernetes to automate the deployment, scaling and management of their Flink applications.
 From release to release, the Flink community has made significant progress in integrating natively with Kubernetes, from active resource management to “Zookeeperless” High Availability (HA). In this blogpost, we&rsquo;ll recap the technical details of deploying Flink applications natively on Kubernetes, diving deeper into Flink’s Kubernetes HA architecture. We&rsquo;ll then walk you through a hands-on example of running a Flink application cluster on Kubernetes with HA enabled. We’ll end with a conclusion covering the advantages of running Flink natively on Kubernetes, and an outlook into future work.
 Native Flink on Kubernetes Integration # Before we dive into the technical details of how the Kubernetes-based HA service works, let us briefly explain what native means in the context of Flink deployments on Kubernetes:
 Flink is self-contained. There will be an embedded Kubernetes client in the Flink client, and so you will not need other external tools (e.g. kubectl, Kubernetes dashboard) to create a Flink cluster on Kubernetes.
@@ -2693,13 +2703,13 @@
 $ ./bin/flink cancel --target kubernetes-application -Dkubernetes.cluster-id=&lt;ClusterID&gt; &lt;JobID&gt; When the job is cancelled, all the Kubernetes resources created by Flink (e.g. JobManager deployment, TaskManager pods, service, Flink configuration ConfigMap, leader-related ConfigMaps) will be deleted automatically.
 Conclusion # The native Kubernetes integration was first introduced in Flink 1.10, abstracting a lot of the complexities of hosting, configuring, managing and operating Flink clusters in cloud-native environments. After three major releases, the community has made great progress in supporting multiple deployment modes (i.e. session and application) and an alternative HA setup that doesn’t depend on Zookeeper.
 Compared with standalone Kubernetes deployments, the native integration is more user-friendly and requires less upfront knowledge about Kubernetes. Given that Flink is now aware of the underlying Kubernetes cluster, it also can benefit from dynamic resource allocation and make more efficient use of Kubernetes cluster resources. The next building block to deepen Flink’s native integration with Kubernetes is the pod template (FLINK-15656), which will greatly enhance the flexibility of using advanced Kubernetes features (e.g. volumes, init container, sidecar container). This work is already in progress and will be added in the upcoming 1.13 release!
-`}),e.add({id:147,href:"/2021/01/29/apache-flink-1.10.3-released/",title:"Apache Flink 1.10.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.10 series.
+`}),e.add({id:148,href:"/2021/01/29/apache-flink-1.10.3-released/",title:"Apache Flink 1.10.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.10 series.
 This release includes 36 fixes and minor improvements for Flink 1.10.2. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.10.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Bug [FLINK-14087] - throws java.lang.ArrayIndexOutOfBoundsException when emiting the data using RebalancePartitioner. [FLINK-15170] - WebFrontendITCase.testCancelYarn fails on travis [FLINK-15467] - Should wait for the end of the source thread during the Task cancellation [FLINK-16246] - Exclude &quot;SdkMBeanRegistrySupport&quot; from dynamically loaded AWS connectors [FLINK-17341] - freeSlot in TaskExecutor.closeJobManagerConnection cause ConcurrentModificationException [FLINK-17458] - TaskExecutorSubmissionTest#testFailingScheduleOrUpdateConsumers [FLINK-17677] - FLINK_LOG_PREFIX recommended in docs is not always available [FLINK-18081] - Fix broken links in &quot;Kerberos Authentication Setup and Configuration&quot; doc [FLINK-18196] - flink throws \`NullPointerException\` when executeCheckpointing [FLINK-18212] - Init lookup join failed when use udf on lookup table [FLINK-18832] - BoundedBlockingSubpartition does not work with StreamTask [FLINK-18959] - Fail to archiveExecutionGraph because job is not finished when dispatcher close [FLINK-19022] - AkkaRpcActor failed to start but no exception information [FLINK-19109] - Split Reader eats chained periodic watermarks [FLINK-19135] - (Stream)ExecutionEnvironment.execute() should not throw ExecutionException [FLINK-19237] - LeaderChangeClusterComponentsTest.testReelectionOfJobMaster failed with &quot;NoResourceAvailableException: Could not allocate the required slot within slot request timeout&quot; [FLINK-19401] - Job stuck in restart loop due to excessive checkpoint recoveries which block the JobMaster [FLINK-19557] - Issue retrieving leader after zookeeper session reconnect [FLINK-19675] - The plan of is incorrect when Calc contains WHERE clause, composite fields access and Python UDF at the same time [FLINK-19901] - Unable to exclude metrics variables for the last metrics reporter. [FLINK-20013] - BoundedBlockingSubpartition may leak network buffer if task is failed or canceled [FLINK-20018] - pipeline.cached-files option cannot escape &#39;:&#39; in path [FLINK-20033] - Job fails when stopping JobMaster [FLINK-20065] - UnalignedCheckpointCompatibilityITCase.test failed with AskTimeoutException [FLINK-20076] - DispatcherTest.testOnRemovedJobGraphDoesNotCleanUpHAFiles does not test the desired functionality [FLINK-20183] - Fix the default PYTHONPATH is overwritten in client side [FLINK-20218] - AttributeError: module &#39;urllib&#39; has no attribute &#39;parse&#39; [FLINK-20875] - [CVE-2020-17518] Directory traversal attack: remote file writing through the REST API Improvement [FLINK-16753] - Exception from AsyncCheckpointRunnable should be wrapped in CheckpointException [FLINK-18287] - Correct the documentation of Python Table API in SQL pages [FLINK-19055] - MemoryManagerSharedResourcesTest contains three tests running extraordinary long [FLINK-19105] - Table API Sample Code Error [FLINK-19252] - Jaas file created under io.tmp.dirs - folder not created if not exists [FLINK-19339] - Support Avro&#39;s unions with logical types [FLINK-19523] - Hide sensitive command-line configurations Task [FLINK-20906] - Update copyright year to 2021 for NOTICE files `}),e.add({id:148,href:"/2021/01/19/apache-flink-1.12.1-released/",title:"Apache Flink 1.12.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.12 series.
+Bug [FLINK-14087] - throws java.lang.ArrayIndexOutOfBoundsException when emiting the data using RebalancePartitioner. [FLINK-15170] - WebFrontendITCase.testCancelYarn fails on travis [FLINK-15467] - Should wait for the end of the source thread during the Task cancellation [FLINK-16246] - Exclude &quot;SdkMBeanRegistrySupport&quot; from dynamically loaded AWS connectors [FLINK-17341] - freeSlot in TaskExecutor.closeJobManagerConnection cause ConcurrentModificationException [FLINK-17458] - TaskExecutorSubmissionTest#testFailingScheduleOrUpdateConsumers [FLINK-17677] - FLINK_LOG_PREFIX recommended in docs is not always available [FLINK-18081] - Fix broken links in &quot;Kerberos Authentication Setup and Configuration&quot; doc [FLINK-18196] - flink throws \`NullPointerException\` when executeCheckpointing [FLINK-18212] - Init lookup join failed when use udf on lookup table [FLINK-18832] - BoundedBlockingSubpartition does not work with StreamTask [FLINK-18959] - Fail to archiveExecutionGraph because job is not finished when dispatcher close [FLINK-19022] - AkkaRpcActor failed to start but no exception information [FLINK-19109] - Split Reader eats chained periodic watermarks [FLINK-19135] - (Stream)ExecutionEnvironment.execute() should not throw ExecutionException [FLINK-19237] - LeaderChangeClusterComponentsTest.testReelectionOfJobMaster failed with &quot;NoResourceAvailableException: Could not allocate the required slot within slot request timeout&quot; [FLINK-19401] - Job stuck in restart loop due to excessive checkpoint recoveries which block the JobMaster [FLINK-19557] - Issue retrieving leader after zookeeper session reconnect [FLINK-19675] - The plan of is incorrect when Calc contains WHERE clause, composite fields access and Python UDF at the same time [FLINK-19901] - Unable to exclude metrics variables for the last metrics reporter. [FLINK-20013] - BoundedBlockingSubpartition may leak network buffer if task is failed or canceled [FLINK-20018] - pipeline.cached-files option cannot escape &#39;:&#39; in path [FLINK-20033] - Job fails when stopping JobMaster [FLINK-20065] - UnalignedCheckpointCompatibilityITCase.test failed with AskTimeoutException [FLINK-20076] - DispatcherTest.testOnRemovedJobGraphDoesNotCleanUpHAFiles does not test the desired functionality [FLINK-20183] - Fix the default PYTHONPATH is overwritten in client side [FLINK-20218] - AttributeError: module &#39;urllib&#39; has no attribute &#39;parse&#39; [FLINK-20875] - [CVE-2020-17518] Directory traversal attack: remote file writing through the REST API Improvement [FLINK-16753] - Exception from AsyncCheckpointRunnable should be wrapped in CheckpointException [FLINK-18287] - Correct the documentation of Python Table API in SQL pages [FLINK-19055] - MemoryManagerSharedResourcesTest contains three tests running extraordinary long [FLINK-19105] - Table API Sample Code Error [FLINK-19252] - Jaas file created under io.tmp.dirs - folder not created if not exists [FLINK-19339] - Support Avro&#39;s unions with logical types [FLINK-19523] - Hide sensitive command-line configurations Task [FLINK-20906] - Update copyright year to 2021 for NOTICE files `}),e.add({id:149,href:"/2021/01/19/apache-flink-1.12.1-released/",title:"Apache Flink 1.12.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.12 series.
 This release includes 79 fixes and minor improvements for Flink 1.12.0. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.12.1.
 Attention: Using unaligned checkpoints in Flink 1.12.0 combined with two/multiple inputs tasks or with union inputs for single input tasks can result in corrupted state. This can happen if a new checkpoint is triggered before recovery is fully completed. For state to be corrupted a task with two or more input gates must receive a checkpoint barrier exactly at the same time this tasks finishes recovering spilled in-flight data. In such case this new checkpoint can succeed, with corrupted/missing in-flight data, which will result in various deserialisation/corrupted data stream errors when someone attempts to recover from such corrupted checkpoint.
@@ -2708,7 +2718,7 @@
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.12.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.12.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-18897] - Add documentation for the maxwell-json format [FLINK-20352] - Rework command line interface documentation page [FLINK-20353] - Rework logging documentation page [FLINK-20354] - Rework standalone deployment documentation page [FLINK-20355] - Rework K8s deployment documentation page [FLINK-20356] - Rework Mesos deployment documentation page [FLINK-20422] - Remove from .html files in flink documentation [FLINK-20485] - Map views are deserialized multiple times [FLINK-20601] - Rework PyFlink CLI documentation Bug [FLINK-19369] - BlobClientTest.testGetFailsDuringStreamingForJobPermanentBlob hangs [FLINK-19435] - Deadlock when loading different driver classes concurrently using Class.forName [FLINK-19725] - Logger cannot be initialized due to timeout: LoggerInitializationException is thrown [FLINK-19880] - Fix ignore-parse-errors not work for the legacy JSON format [FLINK-20213] - Partition commit is delayed when records keep coming [FLINK-20221] - DelimitedInputFormat does not restore compressed filesplits correctly leading to dataloss [FLINK-20273] - Fix Table api Kafka connector Sink Partitioner Document Error [FLINK-20372] - Update Kafka SQL connector page to mention properties.* options [FLINK-20389] - UnalignedCheckpointITCase failure caused by NullPointerException [FLINK-20404] - ZooKeeper quorum fails to start due to missing log4j library [FLINK-20419] - Insert fails due to failure to generate execution plan [FLINK-20428] - ZooKeeperLeaderElectionConnectionHandlingTest.testConnectionSuspendedHandlingDuringInitialization failed with &quot;No result is expected since there was no leader elected before stopping the server, yet&quot; [FLINK-20429] - KafkaTableITCase.testKafkaTemporalJoinChangelog failed with unexpected results [FLINK-20433] - UnalignedCheckpointTestBase.execute failed with &quot;TestTimedOutException: test timed out after 300 seconds&quot; [FLINK-20464] - Some Table examples are not built correctly [FLINK-20467] - Fix the Example in Python DataStream Doc [FLINK-20470] - MissingNode can&#39;t be casted to ObjectNode when deserializing JSON [FLINK-20476] - New File Sink end-to-end test Failed [FLINK-20486] - Hive temporal join should allow monitor interval smaller than 1 hour [FLINK-20492] - The SourceOperatorStreamTask should implement cancelTask() and finishTask() [FLINK-20493] - SQLClientSchemaRegistryITCase failed with &quot;Could not build the flink-dist image&quot; [FLINK-20521] - Null result values are being swallowed by RPC system [FLINK-20525] - StreamArrowPythonGroupWindowAggregateFunctionOperator doesn&#39;t handle rowtime and proctime properly [FLINK-20543] - Fix typo in upsert kafka docs [FLINK-20554] - The Checkpointed Data Size of the Latest Completed Checkpoint is incorrectly displayed on the Overview page of the UI [FLINK-20582] - Fix typos in \`CREATE Statements\` docs [FLINK-20607] - a wrong example in udfs page. [FLINK-20615] - Local recovery and sticky scheduling end-to-end test timeout with &quot;IOException: Stream Closed&quot; [FLINK-20626] - Canceling a job when it is failing will result in job hanging in CANCELING state [FLINK-20630] - [Kinesis][DynamoDB] DynamoDB Streams Consumer fails to consume from Latest [FLINK-20646] - ReduceTransformation does not work with RocksDBStateBackend [FLINK-20648] - Unable to restore job from savepoint when using Kubernetes based HA services [FLINK-20664] - Support setting service account for TaskManager pod [FLINK-20665] - FileNotFoundException when restore from latest Checkpoint [FLINK-20666] - Fix the deserialized Row losing the field_name information in PyFlink [FLINK-20669] - Add the jzlib LICENSE file in flink-python module [FLINK-20703] - HiveSinkCompactionITCase test timeout [FLINK-20704] - Some rel data type does not implement the digest correctly [FLINK-20756] - PythonCalcSplitConditionRule is not working as expected [FLINK-20764] - BatchGroupedReduceOperator does not emit results for singleton inputs [FLINK-20781] - UnalignedCheckpointITCase failure caused by NullPointerException [FLINK-20784] - .staging_xxx does not exist, when insert into hive [FLINK-20793] - Fix NamesTest due to code style refactor [FLINK-20803] - Version mismatch between spotless-maven-plugin and google-java-format plugin [FLINK-20841] - Fix compile error due to duplicated generated files Improvement [FLINK-19013] - Log start/end of state restoration [FLINK-19259] - Use classloader release hooks with Kinesis producer to avoid metaspace leak [FLINK-19832] - Improve handling of immediately failed physical slot in SlotSharingExecutionSlotAllocator [FLINK-20055] - Datadog API Key exposed in Flink JobManager logs [FLINK-20168] - Translate page &#39;Flink Architecture&#39; into Chinese [FLINK-20209] - Add missing checkpoint configuration to Flink UI [FLINK-20298] - Replace usage of in flink documentation [FLINK-20468] - Enable leadership control in MiniCluster to test JM failover [FLINK-20510] - Enable log4j2 monitor interval by default [FLINK-20519] - Extend HBase notice with transitively bundled dependencies [FLINK-20570] - The \`NOTE\` tip style is different from the others in process_function page. [FLINK-20588] - Add docker-compose as appendix to Mesos documentation [FLINK-20629] - [Kinesis][EFO] Migrate from DescribeStream to DescribeStreamSummary [FLINK-20647] - Use yield to generate output datas in ProcessFunction for Python DataStream [FLINK-20650] - Mark &quot;native-k8s&quot; as deprecated in docker-entrypoint.sh [FLINK-20651] - Use Spotless/google-java-format for code formatting/enforcement [FLINK-20682] - Add configuration options related to hadoop [FLINK-20697] - Correct the Type of &quot;lookup.cache.ttl&quot; in jdbc.md/jdbc.zh.md [FLINK-20790] - Generated classes should not be put under src/ directory [FLINK-20792] - Allow shorthand invocation of spotless [FLINK-20805] - Blink runtime classes partially ignored by spotless [FLINK-20822] - Don&#39;t check whether a function is generic in hive catalog [FLINK-20866] - Add how to list jobs in Yarn deployment documentation when HA enabled Task [FLINK-20300] - Create Flink 1.12 release notes [FLINK-20906] - Update copyright year to 2021 for NOTICE files `}),e.add({id:149,href:"/2021/01/18/using-rocksdb-state-backend-in-apache-flink-when-and-how/",title:"Using RocksDB State Backend in Apache Flink: When and How",section:"Flink Blog",content:`Stream processing applications are often stateful, “remembering” information from processed events and using it to influence further event processing. In Flink, the remembered information, i.e., state, is stored locally in the configured state backend. To prevent data loss in case of failures, the state backend periodically persists a snapshot of its contents to a pre-configured durable storage. The RocksDB state backend (i.e., RocksDBStateBackend) is one of the three built-in state backends in Flink. This blog post will guide you through the benefits of using RocksDB to manage your application’s state, explain when and how to use it and also clear up a few common misconceptions. Having said that, this is not a blog post to explain how RocksDB works in-depth or how to do advanced troubleshooting and performance tuning; if you need help with any of those topics, you can reach out to the Flink User Mailing List.
+Sub-task [FLINK-18897] - Add documentation for the maxwell-json format [FLINK-20352] - Rework command line interface documentation page [FLINK-20353] - Rework logging documentation page [FLINK-20354] - Rework standalone deployment documentation page [FLINK-20355] - Rework K8s deployment documentation page [FLINK-20356] - Rework Mesos deployment documentation page [FLINK-20422] - Remove from .html files in flink documentation [FLINK-20485] - Map views are deserialized multiple times [FLINK-20601] - Rework PyFlink CLI documentation Bug [FLINK-19369] - BlobClientTest.testGetFailsDuringStreamingForJobPermanentBlob hangs [FLINK-19435] - Deadlock when loading different driver classes concurrently using Class.forName [FLINK-19725] - Logger cannot be initialized due to timeout: LoggerInitializationException is thrown [FLINK-19880] - Fix ignore-parse-errors not work for the legacy JSON format [FLINK-20213] - Partition commit is delayed when records keep coming [FLINK-20221] - DelimitedInputFormat does not restore compressed filesplits correctly leading to dataloss [FLINK-20273] - Fix Table api Kafka connector Sink Partitioner Document Error [FLINK-20372] - Update Kafka SQL connector page to mention properties.* options [FLINK-20389] - UnalignedCheckpointITCase failure caused by NullPointerException [FLINK-20404] - ZooKeeper quorum fails to start due to missing log4j library [FLINK-20419] - Insert fails due to failure to generate execution plan [FLINK-20428] - ZooKeeperLeaderElectionConnectionHandlingTest.testConnectionSuspendedHandlingDuringInitialization failed with &quot;No result is expected since there was no leader elected before stopping the server, yet&quot; [FLINK-20429] - KafkaTableITCase.testKafkaTemporalJoinChangelog failed with unexpected results [FLINK-20433] - UnalignedCheckpointTestBase.execute failed with &quot;TestTimedOutException: test timed out after 300 seconds&quot; [FLINK-20464] - Some Table examples are not built correctly [FLINK-20467] - Fix the Example in Python DataStream Doc [FLINK-20470] - MissingNode can&#39;t be casted to ObjectNode when deserializing JSON [FLINK-20476] - New File Sink end-to-end test Failed [FLINK-20486] - Hive temporal join should allow monitor interval smaller than 1 hour [FLINK-20492] - The SourceOperatorStreamTask should implement cancelTask() and finishTask() [FLINK-20493] - SQLClientSchemaRegistryITCase failed with &quot;Could not build the flink-dist image&quot; [FLINK-20521] - Null result values are being swallowed by RPC system [FLINK-20525] - StreamArrowPythonGroupWindowAggregateFunctionOperator doesn&#39;t handle rowtime and proctime properly [FLINK-20543] - Fix typo in upsert kafka docs [FLINK-20554] - The Checkpointed Data Size of the Latest Completed Checkpoint is incorrectly displayed on the Overview page of the UI [FLINK-20582] - Fix typos in \`CREATE Statements\` docs [FLINK-20607] - a wrong example in udfs page. [FLINK-20615] - Local recovery and sticky scheduling end-to-end test timeout with &quot;IOException: Stream Closed&quot; [FLINK-20626] - Canceling a job when it is failing will result in job hanging in CANCELING state [FLINK-20630] - [Kinesis][DynamoDB] DynamoDB Streams Consumer fails to consume from Latest [FLINK-20646] - ReduceTransformation does not work with RocksDBStateBackend [FLINK-20648] - Unable to restore job from savepoint when using Kubernetes based HA services [FLINK-20664] - Support setting service account for TaskManager pod [FLINK-20665] - FileNotFoundException when restore from latest Checkpoint [FLINK-20666] - Fix the deserialized Row losing the field_name information in PyFlink [FLINK-20669] - Add the jzlib LICENSE file in flink-python module [FLINK-20703] - HiveSinkCompactionITCase test timeout [FLINK-20704] - Some rel data type does not implement the digest correctly [FLINK-20756] - PythonCalcSplitConditionRule is not working as expected [FLINK-20764] - BatchGroupedReduceOperator does not emit results for singleton inputs [FLINK-20781] - UnalignedCheckpointITCase failure caused by NullPointerException [FLINK-20784] - .staging_xxx does not exist, when insert into hive [FLINK-20793] - Fix NamesTest due to code style refactor [FLINK-20803] - Version mismatch between spotless-maven-plugin and google-java-format plugin [FLINK-20841] - Fix compile error due to duplicated generated files Improvement [FLINK-19013] - Log start/end of state restoration [FLINK-19259] - Use classloader release hooks with Kinesis producer to avoid metaspace leak [FLINK-19832] - Improve handling of immediately failed physical slot in SlotSharingExecutionSlotAllocator [FLINK-20055] - Datadog API Key exposed in Flink JobManager logs [FLINK-20168] - Translate page &#39;Flink Architecture&#39; into Chinese [FLINK-20209] - Add missing checkpoint configuration to Flink UI [FLINK-20298] - Replace usage of in flink documentation [FLINK-20468] - Enable leadership control in MiniCluster to test JM failover [FLINK-20510] - Enable log4j2 monitor interval by default [FLINK-20519] - Extend HBase notice with transitively bundled dependencies [FLINK-20570] - The \`NOTE\` tip style is different from the others in process_function page. [FLINK-20588] - Add docker-compose as appendix to Mesos documentation [FLINK-20629] - [Kinesis][EFO] Migrate from DescribeStream to DescribeStreamSummary [FLINK-20647] - Use yield to generate output datas in ProcessFunction for Python DataStream [FLINK-20650] - Mark &quot;native-k8s&quot; as deprecated in docker-entrypoint.sh [FLINK-20651] - Use Spotless/google-java-format for code formatting/enforcement [FLINK-20682] - Add configuration options related to hadoop [FLINK-20697] - Correct the Type of &quot;lookup.cache.ttl&quot; in jdbc.md/jdbc.zh.md [FLINK-20790] - Generated classes should not be put under src/ directory [FLINK-20792] - Allow shorthand invocation of spotless [FLINK-20805] - Blink runtime classes partially ignored by spotless [FLINK-20822] - Don&#39;t check whether a function is generic in hive catalog [FLINK-20866] - Add how to list jobs in Yarn deployment documentation when HA enabled Task [FLINK-20300] - Create Flink 1.12 release notes [FLINK-20906] - Update copyright year to 2021 for NOTICE files `}),e.add({id:150,href:"/2021/01/18/using-rocksdb-state-backend-in-apache-flink-when-and-how/",title:"Using RocksDB State Backend in Apache Flink: When and How",section:"Flink Blog",content:`Stream processing applications are often stateful, “remembering” information from processed events and using it to influence further event processing. In Flink, the remembered information, i.e., state, is stored locally in the configured state backend. To prevent data loss in case of failures, the state backend periodically persists a snapshot of its contents to a pre-configured durable storage. The RocksDB state backend (i.e., RocksDBStateBackend) is one of the three built-in state backends in Flink. This blog post will guide you through the benefits of using RocksDB to manage your application’s state, explain when and how to use it and also clear up a few common misconceptions. Having said that, this is not a blog post to explain how RocksDB works in-depth or how to do advanced troubleshooting and performance tuning; if you need help with any of those topics, you can reach out to the Flink User Mailing List.
 State in Flink # To best understand state and state backends in Flink, it’s important to distinguish between in-flight state and state snapshots. In-flight state, also known as working state, is the state a Flink job is working on. It is always stored locally in memory (with the possibility to spill to disk) and can be lost when jobs fail without impacting job recoverability. State snapshots, i.e., checkpoints and savepoints, are stored in a remote durable storage, and are used to restore the local state in the case of job failures. The appropriate state backend for a production deployment depends on scalability, throughput, and latency requirements.
 What is RocksDB? # Thinking of RocksDB as a distributed database that needs to run on a cluster and to be managed by specialized administrators is a common misconception. RocksDB is an embeddable persistent key-value store for fast storage. It interacts with Flink via the Java Native Interface (JNI). The picture below shows where RocksDB fits in a Flink cluster node. Details are explained in the following sections.
 RocksDB in Flink # Everything you need to use RocksDB as a state backend is bundled in the Apache Flink distribution, including the native shared library:
@@ -2729,7 +2739,7 @@
 While data is being written or overwritten in RocksDB, flushing from memory to local disks and data compaction are managed in the background by RocksDB threads. On a machine with many CPU cores, you should increase the parallelism of background flushing and compaction by setting the Flink configuration state.backend.rocksdb.thread.num (corresponding to max_background_jobs in RocksDB). The default configuration is usually too small for a production setup. If your job reads frequently from RocksDB, you should consider enabling bloom filters.
 For other RocksDBStateBackend configurations, check the Flink documentation on Advanced RocksDB State Backends Options. For further tuning, check RocksDB Tuning Guide in RocksDB Wiki.
 Conclusion # The RocksDB state backend (i.e., RocksDBStateBackend) is one of the three state backends bundled in Flink, and can be a powerful choice when configuring your streaming applications. It enables scalable applications maintaining up to many terabytes of state with exactly-once processing guarantees. If the state of your Flink job is too large to fit on the JVM heap, you are interested in incremental checkpointing, or you expect to have predictable latency, you should use RocksDBStateBackend. Since RocksDB is embedded in TaskManager processes as native threads and works with files on local disks, RocksDBStateBackend is supported out-of-the-box without the need to further setup and manage any external systems or processes.
-`}),e.add({id:150,href:"/2021/01/11/exploring-fine-grained-recovery-of-bounded-data-sets-on-flink/",title:"Exploring fine-grained recovery of bounded data sets on Flink",section:"Flink Blog",content:`Apache Flink is a very versatile tool for all kinds of data processing workloads. It can process incoming data within a few milliseconds or crunch through petabytes of bounded datasets (also known as batch processing).
+`}),e.add({id:151,href:"/2021/01/11/exploring-fine-grained-recovery-of-bounded-data-sets-on-flink/",title:"Exploring fine-grained recovery of bounded data sets on Flink",section:"Flink Blog",content:`Apache Flink is a very versatile tool for all kinds of data processing workloads. It can process incoming data within a few milliseconds or crunch through petabytes of bounded datasets (also known as batch processing).
 Processing efficiency is not the only parameter users of data processing systems care about. In the real world, system outages due to hardware or software failure are expected to happen all the time. For unbounded (or streaming) workloads, Flink is using periodic checkpoints to allow for reliable and correct recovery. In case of bounded data sets, having a reliable recovery mechanism is mission critical — as users do not want to potentially lose many hours of intermediate processing results.
 Apache Flink 1.9 introduced fine-grained recovery into its internal workload scheduler. The Flink APIs that are made for bounded workloads benefit from this change by individually recovering failed operators, re-using results from the previous processing step.
 In this blog post, we are going to give an overview over these changes, and we will experimentally validate their effectiveness.
@@ -2761,7 +2771,7 @@
 In general, we recommend conducting your own performance experiments on your own hardware and with your own workloads, as results might vary from what we’ve presented here. Despite the findings here, the pipelined mode probably has some performance advantages in environments with rare failures and slower I/O (for example when using spinning disks, or network attached disks). On the other hand, CPU intensive workloads might benefit from the batch mode even in slow I/O environments.
 We should also note that the caching (and subsequent reprocessing on failure) only works if the cached results are still present &ndash; this is currently only the case, if the TaskManager survives a failure. However, this is an unrealistic assumption as many failures would cause the TaskManager process to die. To mitigate this limitation, data processing frameworks employ external shuffle services that persist the cached results independent of the data processing framework. Since Flink 1.9, there is support for a pluggable shuffle service, and there are tickets for adding implementations for YARN (FLINK-13247) and Kubernetes (FLINK-13246). Once these implementations are added, TaskManagers can recover cached results even if the process or machine got killed.
 Despite these considerations, we believe that fine-grained recovery is a great improvement for Flink’s batch capabilities, as it makes the framework much more efficient, even in unstable environments.
-`}),e.add({id:151,href:"/2021/01/07/whats-new-in-the-pulsar-flink-connector-2.7.0/",title:"What's New in the Pulsar Flink Connector 2.7.0",section:"Flink Blog",content:` About the Pulsar Flink Connector # In order for companies to access real-time data insights, they need unified batch and streaming capabilities. Apache Flink unifies batch and stream processing into one single computing engine with “streams” as the unified data representation. Although developers have done extensive work at the computing and API layers, very little work has been done at the data messaging and storage layers. In reality, data is segregated into data silos, created by various storage and messaging technologies. As a result, there is still no single source-of-truth and the overall operation for the developer teams poses significant challenges. To address such operational challenges, we need to store data in streams. Apache Pulsar (together with Apache BookKeeper) perfectly meets the criteria: data is stored as one copy (source-of-truth) and can be accessed in streams (via pub-sub interfaces) and segments (for batch processing). When Flink and Pulsar come together, the two open source technologies create a unified data architecture for real-time, data-driven businesses.
+`}),e.add({id:152,href:"/2021/01/07/whats-new-in-the-pulsar-flink-connector-2.7.0/",title:"What's New in the Pulsar Flink Connector 2.7.0",section:"Flink Blog",content:` About the Pulsar Flink Connector # In order for companies to access real-time data insights, they need unified batch and streaming capabilities. Apache Flink unifies batch and stream processing into one single computing engine with “streams” as the unified data representation. Although developers have done extensive work at the computing and API layers, very little work has been done at the data messaging and storage layers. In reality, data is segregated into data silos, created by various storage and messaging technologies. As a result, there is still no single source-of-truth and the overall operation for the developer teams poses significant challenges. To address such operational challenges, we need to store data in streams. Apache Pulsar (together with Apache BookKeeper) perfectly meets the criteria: data is stored as one copy (source-of-truth) and can be accessed in streams (via pub-sub interfaces) and segments (for batch processing). When Flink and Pulsar come together, the two open source technologies create a unified data architecture for real-time, data-driven businesses.
 The Pulsar Flink connector provides elastic data processing with Apache Pulsar and Apache Flink, allowing Apache Flink to read/write data from/to Apache Pulsar. The Pulsar Flink Connector enables you to concentrate on your business logic without worrying about the storage details.
 Challenges # When we first developed the Pulsar Flink Connector, it received wide adoption from both the Flink and Pulsar communities. Leveraging the Pulsar Flink connector, Hewlett Packard Enterprise (HPE) built a real-time computing platform, BIGO built a real-time message processing system, and Zhihu is in the process of assessing the Connector’s fit for a real-time computing system.
 With more users adopting the Pulsar Flink Connector, it became clear that one of the common issues was evolving around data formats and specifically performing serialization and deserialization. While the Pulsar Flink connector leverages the Pulsar serialization, the previous connector versions did not support the Flink data format. As a result, users had to manually configure their setup in order to use the connector for real-time computing scenarios.
@@ -2795,12 +2805,12 @@
 The batch and stream solution based on the new Flink Source API is divided into two simple parts: SplitEnumerator and Reader. SplitEnumerator discovers and assigns partitions, and Reader reads data from the partition.
 Apache Pulsar stores messages in the ledger block for users to locate the ledgers through Pulsar admin, and then provide broker partition, BookKeeper partition, Offloader partition, and other information through different partitioning policies. For more details, you can refer here.
 Conclusion # The latest version of the Pulsar Flink Connector is now available and we encourage everyone to use/upgrade to the Pulsar Flink Connector 2.7.0. The new version provides significant user enhancements, enabled by various features in Pulsar 2.7 and Flink 1.12. We will be contributing the Pulsar Flink Connector 2.7.0 to the Apache Flink repository soon. If you have any questions or concerns about the Pulsar Flink Connector, feel free to open issues in this repository.
-`}),e.add({id:152,href:"/2021/01/02/stateful-functions-2.2.2-release-announcement/",title:"Stateful Functions 2.2.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink community released the second bugfix release of the Stateful Functions (StateFun) 2.2 series, version 2.2.2.
+`}),e.add({id:153,href:"/2021/01/02/stateful-functions-2.2.2-release-announcement/",title:"Stateful Functions 2.2.2 Release Announcement",section:"Flink Blog",content:`The Apache Flink community released the second bugfix release of the Stateful Functions (StateFun) 2.2 series, version 2.2.2.
 The most important change of this bugfix release is upgrading Apache Flink to version 1.11.3. In addition to many stability fixes to the Flink runtime itself, this also allows StateFun applications to safely use savepoints to upgrade from older versions earlier than StateFun 2.2.1. Previously, restoring from savepoints could have failed under certain conditions.
 We strongly recommend all users to upgrade to 2.2.2.
 You can find the binaries on the updated Downloads page.
 This release includes 5 fixes and minor improvements since StateFun 2.2.1. Below is a detailed list of all fixes and improvements:
-Improvement [FLINK-20699] - Feedback invocation_id must not be constant. Task [FLINK-20161] - Consider switching from Travis CI to Github Actions for flink-statefun&#39;s CI workflows [FLINK-20189] - Restored feedback events may be silently dropped if per key-group header bytes were not fully read [FLINK-20636] - Require unaligned checkpoints to be disabled in StateFun applications [FLINK-20689] - Upgrade StateFun to Flink 1.11.3 `}),e.add({id:153,href:"/2020/12/18/apache-flink-1.11.3-released/",title:"Apache Flink 1.11.3 Released",section:"Flink Blog",content:"The Apache Flink community released the third bugfix version of the Apache Flink 1.11 series.\nThis release includes 151 fixes and minor improvements for Flink 1.11.2. The list below includes a detailed list of all fixes and improvements.\nWe highly recommend all users to upgrade to Flink 1.11.3.\nUpdated Maven dependencies:\n&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.11.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.\nList of resolved issues:\nSub-task [FLINK-17393] - Improve the `FutureCompletingBlockingQueue` to wakeup blocking put() more elegantly. [FLINK-18604] - HBase ConnectorDescriptor can not work in Table API [FLINK-18673] - Calling ROW() in a UDF results in UnsupportedOperationException [FLINK-18680] - Improve RecordsWithSplitIds API [FLINK-18916] - Add a &quot;Operations&quot; link(linked to dev/table/tableApi.md) under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18918] - Add a &quot;Connectors&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18922] - Add a &quot;Catalogs&quot; link (linked to dev/table/catalogs.md) under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18926] - Add a &quot;Environment Variables&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-19162] - Allow Split Reader based sources to reuse record batches [FLINK-19205] - SourceReaderContext should give access to Configuration and Hostbame [FLINK-20397] - Pass checkpointId to OperatorCoordinator.resetToCheckpoint(). Bug [FLINK-9992] - FsStorageLocationReferenceTest#testEncodeAndDecode failed in CI [FLINK-13733] - FlinkKafkaInternalProducerITCase.testHappyPath fails on Travis [FLINK-15170] - WebFrontendITCase.testCancelYarn fails on travis [FLINK-16246] - Exclude &quot;SdkMBeanRegistrySupport&quot; from dynamically loaded AWS connectors [FLINK-16268] - Failed to run rank over window with Hive built-in functions [FLINK-16768] - HadoopS3RecoverableWriterITCase.testRecoverWithStateWithMultiPart hangs [FLINK-17341] - freeSlot in TaskExecutor.closeJobManagerConnection cause ConcurrentModificationException [FLINK-17458] - TaskExecutorSubmissionTest#testFailingScheduleOrUpdateConsumers [FLINK-17677] - FLINK_LOG_PREFIX recommended in docs is not always available [FLINK-17825] - HA end-to-end gets killed due to timeout [FLINK-18128] - CoordinatedSourceITCase.testMultipleSources gets stuck [FLINK-18196] - flink throws `NullPointerException` when executeCheckpointing [FLINK-18222] - &quot;Avro Confluent Schema Registry nightly end-to-end test&quot; unstable with &quot;Kafka cluster did not start after 120 seconds&quot; [FLINK-18815] - AbstractCloseableRegistryTest.testClose unstable [FLINK-18818] - HadoopRenameCommitterHDFSTest.testCommitOneFile[Override: false] failed with &quot;java.io.IOException: The stream is closed&quot; [FLINK-18836] - Python UDTF doesn&#39;t work well when the return type isn&#39;t generator [FLINK-18915] - FIXED_PATH(dummy Hadoop Path) with WriterImpl may cause ORC writer OOM [FLINK-19022] - AkkaRpcActor failed to start but no exception information [FLINK-19121] - Avoid accessing HDFS frequently in HiveBulkWriterFactory [FLINK-19135] - (Stream)ExecutionEnvironment.execute() should not throw ExecutionException [FLINK-19138] - Python UDF supports directly specifying input_types as DataTypes.ROW [FLINK-19140] - Join with Table Function (UDTF) SQL example is incorrect [FLINK-19151] - Flink does not normalize container resource with correct configurations when Yarn FairScheduler is used [FLINK-19154] - Application mode deletes HA data in case of suspended ZooKeeper connection [FLINK-19170] - Parameter naming error [FLINK-19201] - PyFlink e2e tests is instable and failed with &quot;Connection broken: OSError&quot; [FLINK-19227] - The catalog is still created after opening failed in catalog registering [FLINK-19237] - LeaderChangeClusterComponentsTest.testReelectionOfJobMaster failed with &quot;NoResourceAvailableException: Could not allocate the required slot within slot request timeout&quot; [FLINK-19244] - CSV format can&#39;t deserialize null ROW field [FLINK-19250] - SplitFetcherManager does not propagate errors correctly [FLINK-19253] - SourceReaderTestBase.testAddSplitToExistingFetcher hangs [FLINK-19258] - Fix the wrong example of &quot;csv.line-delimiter&quot; in CSV documentation [FLINK-19280] - The option &quot;sink.buffer-flush.max-rows&quot; for JDBC can&#39;t be disabled by set to zero [FLINK-19281] - LIKE cannot recognize full table path [FLINK-19291] - Fix exception for AvroSchemaConverter#convertToSchema when RowType contains multiple row fields [FLINK-19295] - YARNSessionFIFOITCase.checkForProhibitedLogContents found a log with prohibited string [FLINK-19300] - Timer loss after restoring from savepoint [FLINK-19321] - CollectSinkFunction does not define serialVersionUID [FLINK-19338] - New source interface cannot unregister unregistered source [FLINK-19361] - Make HiveCatalog thread safe [FLINK-19398] - Hive connector fails with IllegalAccessError if submitted as usercode [FLINK-19401] - Job stuck in restart loop due to excessive checkpoint recoveries which block the JobMaster [FLINK-19423] - Fix ArrayIndexOutOfBoundsException when executing DELETE statement in JDBC upsert sink [FLINK-19433] - An Error example of FROM_UNIXTIME function in document [FLINK-19448] - CoordinatedSourceITCase.testEnumeratorReaderCommunication hangs [FLINK-19535] - SourceCoordinator should avoid fail job multiple times. [FLINK-19557] - Issue retrieving leader after zookeeper session reconnect [FLINK-19585] - UnalignedCheckpointCompatibilityITCase.test:97-&gt;runAndTakeSavepoint: &quot;Not all required tasks are currently running.&quot; [FLINK-19587] - Error result when casting binary type as varchar [FLINK-19618] - Broken link in docs [FLINK-19629] - Fix NullPointException when deserializing map field with null value for Avro format [FLINK-19675] - The plan of is incorrect when Calc contains WHERE clause, composite fields access and Python UDF at the same time [FLINK-19695] - Writing Table with RowTime Column of type TIMESTAMP(3) to Kafka fails with ClassCastException [FLINK-19717] - SourceReaderBase.pollNext may return END_OF_INPUT if SplitReader.fetch throws [FLINK-19740] - Error in to_pandas for table containing event time: class java.time.LocalDateTime cannot be cast to class java.sql.Timestamp [FLINK-19741] - InternalTimeServiceManager fails to restore due to corrupt reads if there are other users of raw keyed state streams [FLINK-19748] - KeyGroupRangeOffsets#KeyGroupOffsetsIterator should skip key groups that don&#39;t have a defined offset [FLINK-19750] - Deserializer is not opened in Kafka consumer when restoring from state [FLINK-19755] - Fix CEP documentation error of the example in &#39;After Match Strategy&#39; section [FLINK-19775] - SystemProcessingTimeServiceTest.testImmediateShutdown is instable [FLINK-19777] - Fix NullPointException for WindowOperator.close() [FLINK-19790] - Writing MAP&lt;STRING, STRING&gt; to Kafka with JSON format produces incorrect data. [FLINK-19806] - Job may try to leave SUSPENDED state in ExecutionGraph#failJob() [FLINK-19816] - Flink restored from a wrong checkpoint (a very old one and not the last completed one) [FLINK-19852] - Managed memory released check can block IterativeTask [FLINK-19867] - Validation fails for UDF that accepts var-args [FLINK-19894] - Use iloc for positional slicing instead of direct slicing in from_pandas [FLINK-19901] - Unable to exclude metrics variables for the last metrics reporter. [FLINK-19906] - Incorrect result when compare two binary fields [FLINK-19907] - Channel state (upstream) can be restored after emission of new elements (watermarks) [FLINK-19909] - Flink application in attach mode could not terminate when the only job is canceled [FLINK-19948] - Calling NOW() function throws compile exception [FLINK-20013] - BoundedBlockingSubpartition may leak network buffer if task is failed or canceled [FLINK-20018] - pipeline.cached-files option cannot escape &#39;:&#39; in path [FLINK-20033] - Job fails when stopping JobMaster [FLINK-20050] - SourceCoordinatorProviderTest.testCheckpointAndReset failed with NullPointerException [FLINK-20063] - File Source requests an additional split on every restore. [FLINK-20064] - Broken links in the documentation [FLINK-20065] - UnalignedCheckpointCompatibilityITCase.test failed with AskTimeoutException [FLINK-20068] - KafkaSubscriberTest.testTopicPatternSubscriber failed with unexpected results [FLINK-20069] - docs_404_check doesn&#39;t work properly [FLINK-20076] - DispatcherTest.testOnRemovedJobGraphDoesNotCleanUpHAFiles does not test the desired functionality [FLINK-20079] - Modified UnalignedCheckpointITCase...MassivelyParallel fails [FLINK-20081] - ExecutorNotifier should run handler in the main thread when receive an exception from the callable. [FLINK-20143] - use `yarn.provided.lib.dirs` config deploy job failed in yarn per job mode [FLINK-20165] - YARNSessionFIFOITCase.checkForProhibitedLogContents: Error occurred during initialization of boot layer java.lang.IllegalStateException: Module system already initialized [FLINK-20175] - Avro Confluent Registry SQL format does not support adding nullable columns [FLINK-20183] - Fix the default PYTHONPATH is overwritten in client side [FLINK-20193] - SourceCoordinator should catch exception thrown from SplitEnumerator.start() [FLINK-20194] - KafkaSourceFetcherManager.commitOffsets() should handle the case when there is no split fetcher. [FLINK-20200] - SQL Hints are not supported in &quot;Create View&quot; syntax [FLINK-20213] - Partition commit is delayed when records keep coming [FLINK-20221] - DelimitedInputFormat does not restore compressed filesplits correctly leading to dataloss [FLINK-20222] - The CheckpointCoordinator should reset the OperatorCoordinators when fail before the first checkpoint. [FLINK-20223] - The RecreateOnResetOperatorCoordinator and SourceCoordinator executor thread should use the user class loader. [FLINK-20243] - Remove useless words in documents [FLINK-20262] - Building flink-dist docker image does not work without python2 [FLINK-20266] - New Sources prevent JVM shutdown when running a job [FLINK-20270] - Fix the regression of missing ExternallyInducedSource support in FLIP-27 Source. [FLINK-20277] - flink-1.11.2 ContinuousFileMonitoringFunction cannot restore from failure [FLINK-20284] - Error happens in TaskExecutor when closing JobMaster connection if there was a python UDF [FLINK-20285] - LazyFromSourcesSchedulingStrategy is possible to schedule non-CREATED vertices [FLINK-20333] - Flink standalone cluster throws metaspace OOM after submitting multiple PyFlink UDF jobs. [FLINK-20351] - Execution.transitionState does not properly log slot location [FLINK-20382] - Exception thrown from JobMaster.startScheduling() may be ignored. [FLINK-20396] - Add &quot;OperatorCoordinator.resetSubtask()&quot; to fix order problems of &quot;subtaskFailed()&quot; [FLINK-20404] - ZooKeeper quorum fails to start due to missing log4j library [FLINK-20413] - Sources should add splits back in &quot;resetSubtask()&quot;, rather than in &quot;subtaskFailed()&quot;. [FLINK-20418] - NPE in IteratorSourceReader [FLINK-20442] - Fix license documentation mistakes in flink-python.jar [FLINK-20492] - The SourceOperatorStreamTask should implement cancelTask() and finishTask() [FLINK-20554] - The Checkpointed Data Size of the Latest Completed Checkpoint is incorrectly displayed on the Overview page of the UI New Feature [FLINK-19934] - [FLIP-27 source] add new API: SplitEnumeratorContext.runInCoordinatorThread(Runnable) Improvement [FLINK-16753] - Exception from AsyncCheckpointRunnable should be wrapped in CheckpointException [FLINK-18139] - Unaligned checkpoints checks wrong channels for inflight data. [FLINK-18500] - Make the legacy planner exception more clear when resolving computed columns types for schema [FLINK-18545] - Sql api cannot specify flink job name [FLINK-18715] - add cpu usage metric of jobmanager/taskmanager [FLINK-19193] - Recommend stop-with-savepoint in upgrade guidelines [FLINK-19225] - Improve code and logging in SourceReaderBase [FLINK-19245] - Set default queue capacity for FLIP-27 source handover queue to 2 [FLINK-19251] - Avoid confusing queue handling in &quot;SplitReader.handleSplitsChanges()&quot; [FLINK-19252] - Jaas file created under io.tmp.dirs - folder not created if not exists [FLINK-19265] - Simplify handling of &#39;NoMoreSplitsEvent&#39; [FLINK-19339] - Support Avro&#39;s unions with logical types [FLINK-19523] - Hide sensitive command-line configurations [FLINK-19569] - Upgrade ICU4J to 67.1+ [FLINK-19677] - TaskManager takes abnormally long time to register with JobManager on Kubernetes [FLINK-19698] - Add close() method and onCheckpointComplete() to the Source. [FLINK-19892] - Replace __metaclass__ field with metaclass keyword [FLINK-20049] - Simplify handling of &quot;request split&quot;. [FLINK-20055] - Datadog API Key exposed in Flink JobManager logs [FLINK-20142] - Update the document for CREATE TABLE LIKE that source table from different catalog is supported [FLINK-20152] - Document which execution.target values are supported [FLINK-20156] - JavaDocs of WatermarkStrategy.withTimestampAssigner are wrong wrt Java 8 [FLINK-20169] - Move emitting MAX_WATERMARK out of SourceOperator processing loop [FLINK-20207] - Improve the error message printed when submitting the pyflink jobs via &#39;flink run&#39; [FLINK-20296] - Explanation of keyBy was broken by find/replace of deprecated forms of keyBy Test [FLINK-18725] - &quot;Run Kubernetes test&quot; failed with &quot;30025: provided port is already allocated&quot; Task [FLINK-20455] - Add check to LicenseChecker for top level /LICENSE files in shaded jars "}),e.add({id:154,href:"/2020/12/10/apache-flink-1.12.0-release-announcement/",title:"Apache Flink 1.12.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink 1.12.0! Close to 300 contributors worked on over 1k threads to bring significant improvements to usability as well as new features that simplify (and unify) Flink handling across the API stack.
+Improvement [FLINK-20699] - Feedback invocation_id must not be constant. Task [FLINK-20161] - Consider switching from Travis CI to Github Actions for flink-statefun&#39;s CI workflows [FLINK-20189] - Restored feedback events may be silently dropped if per key-group header bytes were not fully read [FLINK-20636] - Require unaligned checkpoints to be disabled in StateFun applications [FLINK-20689] - Upgrade StateFun to Flink 1.11.3 `}),e.add({id:154,href:"/2020/12/18/apache-flink-1.11.3-released/",title:"Apache Flink 1.11.3 Released",section:"Flink Blog",content:"The Apache Flink community released the third bugfix version of the Apache Flink 1.11 series.\nThis release includes 151 fixes and minor improvements for Flink 1.11.2. The list below includes a detailed list of all fixes and improvements.\nWe highly recommend all users to upgrade to Flink 1.11.3.\nUpdated Maven dependencies:\n&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.11.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.\nList of resolved issues:\nSub-task [FLINK-17393] - Improve the `FutureCompletingBlockingQueue` to wakeup blocking put() more elegantly. [FLINK-18604] - HBase ConnectorDescriptor can not work in Table API [FLINK-18673] - Calling ROW() in a UDF results in UnsupportedOperationException [FLINK-18680] - Improve RecordsWithSplitIds API [FLINK-18916] - Add a &quot;Operations&quot; link(linked to dev/table/tableApi.md) under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18918] - Add a &quot;Connectors&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18922] - Add a &quot;Catalogs&quot; link (linked to dev/table/catalogs.md) under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18926] - Add a &quot;Environment Variables&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-19162] - Allow Split Reader based sources to reuse record batches [FLINK-19205] - SourceReaderContext should give access to Configuration and Hostbame [FLINK-20397] - Pass checkpointId to OperatorCoordinator.resetToCheckpoint(). Bug [FLINK-9992] - FsStorageLocationReferenceTest#testEncodeAndDecode failed in CI [FLINK-13733] - FlinkKafkaInternalProducerITCase.testHappyPath fails on Travis [FLINK-15170] - WebFrontendITCase.testCancelYarn fails on travis [FLINK-16246] - Exclude &quot;SdkMBeanRegistrySupport&quot; from dynamically loaded AWS connectors [FLINK-16268] - Failed to run rank over window with Hive built-in functions [FLINK-16768] - HadoopS3RecoverableWriterITCase.testRecoverWithStateWithMultiPart hangs [FLINK-17341] - freeSlot in TaskExecutor.closeJobManagerConnection cause ConcurrentModificationException [FLINK-17458] - TaskExecutorSubmissionTest#testFailingScheduleOrUpdateConsumers [FLINK-17677] - FLINK_LOG_PREFIX recommended in docs is not always available [FLINK-17825] - HA end-to-end gets killed due to timeout [FLINK-18128] - CoordinatedSourceITCase.testMultipleSources gets stuck [FLINK-18196] - flink throws `NullPointerException` when executeCheckpointing [FLINK-18222] - &quot;Avro Confluent Schema Registry nightly end-to-end test&quot; unstable with &quot;Kafka cluster did not start after 120 seconds&quot; [FLINK-18815] - AbstractCloseableRegistryTest.testClose unstable [FLINK-18818] - HadoopRenameCommitterHDFSTest.testCommitOneFile[Override: false] failed with &quot;java.io.IOException: The stream is closed&quot; [FLINK-18836] - Python UDTF doesn&#39;t work well when the return type isn&#39;t generator [FLINK-18915] - FIXED_PATH(dummy Hadoop Path) with WriterImpl may cause ORC writer OOM [FLINK-19022] - AkkaRpcActor failed to start but no exception information [FLINK-19121] - Avoid accessing HDFS frequently in HiveBulkWriterFactory [FLINK-19135] - (Stream)ExecutionEnvironment.execute() should not throw ExecutionException [FLINK-19138] - Python UDF supports directly specifying input_types as DataTypes.ROW [FLINK-19140] - Join with Table Function (UDTF) SQL example is incorrect [FLINK-19151] - Flink does not normalize container resource with correct configurations when Yarn FairScheduler is used [FLINK-19154] - Application mode deletes HA data in case of suspended ZooKeeper connection [FLINK-19170] - Parameter naming error [FLINK-19201] - PyFlink e2e tests is instable and failed with &quot;Connection broken: OSError&quot; [FLINK-19227] - The catalog is still created after opening failed in catalog registering [FLINK-19237] - LeaderChangeClusterComponentsTest.testReelectionOfJobMaster failed with &quot;NoResourceAvailableException: Could not allocate the required slot within slot request timeout&quot; [FLINK-19244] - CSV format can&#39;t deserialize null ROW field [FLINK-19250] - SplitFetcherManager does not propagate errors correctly [FLINK-19253] - SourceReaderTestBase.testAddSplitToExistingFetcher hangs [FLINK-19258] - Fix the wrong example of &quot;csv.line-delimiter&quot; in CSV documentation [FLINK-19280] - The option &quot;sink.buffer-flush.max-rows&quot; for JDBC can&#39;t be disabled by set to zero [FLINK-19281] - LIKE cannot recognize full table path [FLINK-19291] - Fix exception for AvroSchemaConverter#convertToSchema when RowType contains multiple row fields [FLINK-19295] - YARNSessionFIFOITCase.checkForProhibitedLogContents found a log with prohibited string [FLINK-19300] - Timer loss after restoring from savepoint [FLINK-19321] - CollectSinkFunction does not define serialVersionUID [FLINK-19338] - New source interface cannot unregister unregistered source [FLINK-19361] - Make HiveCatalog thread safe [FLINK-19398] - Hive connector fails with IllegalAccessError if submitted as usercode [FLINK-19401] - Job stuck in restart loop due to excessive checkpoint recoveries which block the JobMaster [FLINK-19423] - Fix ArrayIndexOutOfBoundsException when executing DELETE statement in JDBC upsert sink [FLINK-19433] - An Error example of FROM_UNIXTIME function in document [FLINK-19448] - CoordinatedSourceITCase.testEnumeratorReaderCommunication hangs [FLINK-19535] - SourceCoordinator should avoid fail job multiple times. [FLINK-19557] - Issue retrieving leader after zookeeper session reconnect [FLINK-19585] - UnalignedCheckpointCompatibilityITCase.test:97-&gt;runAndTakeSavepoint: &quot;Not all required tasks are currently running.&quot; [FLINK-19587] - Error result when casting binary type as varchar [FLINK-19618] - Broken link in docs [FLINK-19629] - Fix NullPointException when deserializing map field with null value for Avro format [FLINK-19675] - The plan of is incorrect when Calc contains WHERE clause, composite fields access and Python UDF at the same time [FLINK-19695] - Writing Table with RowTime Column of type TIMESTAMP(3) to Kafka fails with ClassCastException [FLINK-19717] - SourceReaderBase.pollNext may return END_OF_INPUT if SplitReader.fetch throws [FLINK-19740] - Error in to_pandas for table containing event time: class java.time.LocalDateTime cannot be cast to class java.sql.Timestamp [FLINK-19741] - InternalTimeServiceManager fails to restore due to corrupt reads if there are other users of raw keyed state streams [FLINK-19748] - KeyGroupRangeOffsets#KeyGroupOffsetsIterator should skip key groups that don&#39;t have a defined offset [FLINK-19750] - Deserializer is not opened in Kafka consumer when restoring from state [FLINK-19755] - Fix CEP documentation error of the example in &#39;After Match Strategy&#39; section [FLINK-19775] - SystemProcessingTimeServiceTest.testImmediateShutdown is instable [FLINK-19777] - Fix NullPointException for WindowOperator.close() [FLINK-19790] - Writing MAP&lt;STRING, STRING&gt; to Kafka with JSON format produces incorrect data. [FLINK-19806] - Job may try to leave SUSPENDED state in ExecutionGraph#failJob() [FLINK-19816] - Flink restored from a wrong checkpoint (a very old one and not the last completed one) [FLINK-19852] - Managed memory released check can block IterativeTask [FLINK-19867] - Validation fails for UDF that accepts var-args [FLINK-19894] - Use iloc for positional slicing instead of direct slicing in from_pandas [FLINK-19901] - Unable to exclude metrics variables for the last metrics reporter. [FLINK-19906] - Incorrect result when compare two binary fields [FLINK-19907] - Channel state (upstream) can be restored after emission of new elements (watermarks) [FLINK-19909] - Flink application in attach mode could not terminate when the only job is canceled [FLINK-19948] - Calling NOW() function throws compile exception [FLINK-20013] - BoundedBlockingSubpartition may leak network buffer if task is failed or canceled [FLINK-20018] - pipeline.cached-files option cannot escape &#39;:&#39; in path [FLINK-20033] - Job fails when stopping JobMaster [FLINK-20050] - SourceCoordinatorProviderTest.testCheckpointAndReset failed with NullPointerException [FLINK-20063] - File Source requests an additional split on every restore. [FLINK-20064] - Broken links in the documentation [FLINK-20065] - UnalignedCheckpointCompatibilityITCase.test failed with AskTimeoutException [FLINK-20068] - KafkaSubscriberTest.testTopicPatternSubscriber failed with unexpected results [FLINK-20069] - docs_404_check doesn&#39;t work properly [FLINK-20076] - DispatcherTest.testOnRemovedJobGraphDoesNotCleanUpHAFiles does not test the desired functionality [FLINK-20079] - Modified UnalignedCheckpointITCase...MassivelyParallel fails [FLINK-20081] - ExecutorNotifier should run handler in the main thread when receive an exception from the callable. [FLINK-20143] - use `yarn.provided.lib.dirs` config deploy job failed in yarn per job mode [FLINK-20165] - YARNSessionFIFOITCase.checkForProhibitedLogContents: Error occurred during initialization of boot layer java.lang.IllegalStateException: Module system already initialized [FLINK-20175] - Avro Confluent Registry SQL format does not support adding nullable columns [FLINK-20183] - Fix the default PYTHONPATH is overwritten in client side [FLINK-20193] - SourceCoordinator should catch exception thrown from SplitEnumerator.start() [FLINK-20194] - KafkaSourceFetcherManager.commitOffsets() should handle the case when there is no split fetcher. [FLINK-20200] - SQL Hints are not supported in &quot;Create View&quot; syntax [FLINK-20213] - Partition commit is delayed when records keep coming [FLINK-20221] - DelimitedInputFormat does not restore compressed filesplits correctly leading to dataloss [FLINK-20222] - The CheckpointCoordinator should reset the OperatorCoordinators when fail before the first checkpoint. [FLINK-20223] - The RecreateOnResetOperatorCoordinator and SourceCoordinator executor thread should use the user class loader. [FLINK-20243] - Remove useless words in documents [FLINK-20262] - Building flink-dist docker image does not work without python2 [FLINK-20266] - New Sources prevent JVM shutdown when running a job [FLINK-20270] - Fix the regression of missing ExternallyInducedSource support in FLIP-27 Source. [FLINK-20277] - flink-1.11.2 ContinuousFileMonitoringFunction cannot restore from failure [FLINK-20284] - Error happens in TaskExecutor when closing JobMaster connection if there was a python UDF [FLINK-20285] - LazyFromSourcesSchedulingStrategy is possible to schedule non-CREATED vertices [FLINK-20333] - Flink standalone cluster throws metaspace OOM after submitting multiple PyFlink UDF jobs. [FLINK-20351] - Execution.transitionState does not properly log slot location [FLINK-20382] - Exception thrown from JobMaster.startScheduling() may be ignored. [FLINK-20396] - Add &quot;OperatorCoordinator.resetSubtask()&quot; to fix order problems of &quot;subtaskFailed()&quot; [FLINK-20404] - ZooKeeper quorum fails to start due to missing log4j library [FLINK-20413] - Sources should add splits back in &quot;resetSubtask()&quot;, rather than in &quot;subtaskFailed()&quot;. [FLINK-20418] - NPE in IteratorSourceReader [FLINK-20442] - Fix license documentation mistakes in flink-python.jar [FLINK-20492] - The SourceOperatorStreamTask should implement cancelTask() and finishTask() [FLINK-20554] - The Checkpointed Data Size of the Latest Completed Checkpoint is incorrectly displayed on the Overview page of the UI New Feature [FLINK-19934] - [FLIP-27 source] add new API: SplitEnumeratorContext.runInCoordinatorThread(Runnable) Improvement [FLINK-16753] - Exception from AsyncCheckpointRunnable should be wrapped in CheckpointException [FLINK-18139] - Unaligned checkpoints checks wrong channels for inflight data. [FLINK-18500] - Make the legacy planner exception more clear when resolving computed columns types for schema [FLINK-18545] - Sql api cannot specify flink job name [FLINK-18715] - add cpu usage metric of jobmanager/taskmanager [FLINK-19193] - Recommend stop-with-savepoint in upgrade guidelines [FLINK-19225] - Improve code and logging in SourceReaderBase [FLINK-19245] - Set default queue capacity for FLIP-27 source handover queue to 2 [FLINK-19251] - Avoid confusing queue handling in &quot;SplitReader.handleSplitsChanges()&quot; [FLINK-19252] - Jaas file created under io.tmp.dirs - folder not created if not exists [FLINK-19265] - Simplify handling of &#39;NoMoreSplitsEvent&#39; [FLINK-19339] - Support Avro&#39;s unions with logical types [FLINK-19523] - Hide sensitive command-line configurations [FLINK-19569] - Upgrade ICU4J to 67.1+ [FLINK-19677] - TaskManager takes abnormally long time to register with JobManager on Kubernetes [FLINK-19698] - Add close() method and onCheckpointComplete() to the Source. [FLINK-19892] - Replace __metaclass__ field with metaclass keyword [FLINK-20049] - Simplify handling of &quot;request split&quot;. [FLINK-20055] - Datadog API Key exposed in Flink JobManager logs [FLINK-20142] - Update the document for CREATE TABLE LIKE that source table from different catalog is supported [FLINK-20152] - Document which execution.target values are supported [FLINK-20156] - JavaDocs of WatermarkStrategy.withTimestampAssigner are wrong wrt Java 8 [FLINK-20169] - Move emitting MAX_WATERMARK out of SourceOperator processing loop [FLINK-20207] - Improve the error message printed when submitting the pyflink jobs via &#39;flink run&#39; [FLINK-20296] - Explanation of keyBy was broken by find/replace of deprecated forms of keyBy Test [FLINK-18725] - &quot;Run Kubernetes test&quot; failed with &quot;30025: provided port is already allocated&quot; Task [FLINK-20455] - Add check to LicenseChecker for top level /LICENSE files in shaded jars "}),e.add({id:155,href:"/2020/12/10/apache-flink-1.12.0-release-announcement/",title:"Apache Flink 1.12.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to announce the release of Flink 1.12.0! Close to 300 contributors worked on over 1k threads to bring significant improvements to usability as well as new features that simplify (and unify) Flink handling across the API stack.
 Release Highlights
 The community has added support for efficient batch execution in the DataStream API. This is the next major milestone towards achieving a truly unified runtime for both batch and stream processing.
 Kubernetes-based High Availability (HA) was implemented as an alternative to ZooKeeper for highly available production setups.
@@ -2868,7 +2878,7 @@
 Release Notes # Please review the release notes carefully for a detailed list of changes and new features if you plan to upgrade your setup to Flink 1.12. This version is API-compatible with previous 1.x releases for APIs annotated with the @Public annotation.
 List of Contributors # The Apache Flink community would like to thank each and every one of the 300 contributors that have made this release possible:
 Abhijit Shandilya, Aditya Agarwal, Alan Su, Alexander Alexandrov, Alexander Fedulov, Alexey Trenikhin, Aljoscha Krettek, Allen Madsen, Andrei Bulgakov, Andrey Zagrebin, Arvid Heise, Authuir, Bairos, Bartosz Krasinski, Benchao Li, Brandon, Brian Zhou, C08061, Canbin Zheng, Cedric Chen, Chesnay Schepler, Chris Nix, Congxian Qiu, DG-Wangtao, Da(Dash)Shen, Dan Hill, Daniel Magyar, Danish Amjad, Danny Chan, Danny Cranmer, David Anderson, Dawid Wysakowicz, Devin Thomson, Dian Fu, Dongxu Wang, Dylan Forciea, Echo Lee, Etienne Chauchot, Fabian Paul, Felipe Lolas, Fin-Chan, Fin-chan, Flavio Pompermaier, Flora Tao, Fokko Driesprong, Gao Yun, Gary Yao, Ghildiyal, GitHub, Grebennikov Roman, GuoWei Ma, Gyula Fora, Hequn Cheng, Herman, Hong Teoh, HuangXiao, HuangXingBo, Husky Zeng, Hyeonseop Lee, I. Raleigh, Ivan, Jacky Lau, Jark Wu, Jaskaran Bindra, Jeff Yang, Jeff Zhang, Jiangjie (Becket) Qin, Jiatao Tao, Jiayi Liao, Jiayi-Liao, Jiezhi.G, Jimmy.Zhou, Jindrich Vimr, Jingsong Lee, JingsongLi, Joey Echeverria, Juha Mynttinen, Jun Qin, Jörn Kottmann, Karim Mansour, Kevin Bohinski, Kezhu Wang, Konstantin Knauf, Kostas Kloudas, Kurt Young, Lee Do-Kyeong, Leonard Xu, Lijie Wang, Liu Jiangang, Lorenzo Nicora, LululuAlu, Luxios22, Marta Paes Moreira, Mateusz Sabat, Matthias Pohl, Maximilian Michels, Miklos Gergely, Milan Nikl, Nico Kruber, Niel Hu, Niels Basjes, Oleksandr Nitavskyi, Paul Lam, Peng, PengFei Li, PengchengLiu, Peter Huang, Piotr Nowojski, PoojaChandak, Qingsheng Ren, Qishang Zhong, Richard Deurwaarder, Richard Moorhead, Robert Metzger, Roc Marshal, Roey Shem Tov, Roman, Roman Khachatryan, Rong Rong, Rui Li, Seth Wiesman, Shawn Huang, ShawnHx, Shengkai, Shuiqiang Chen, Shuo Cheng, SteNicholas, Stephan Ewen, Steve Whelan, Steven Wu, Tartarus0zm, Terry Wang, Thesharing, Thomas Weise, Till Rohrmann, Timo Walther, TsReaper, Tzu-Li (Gordon) Tai, Ufuk Celebi, V1ncentzzZ, Vladimirs Kotovs, Wei Zhong, Weike DONG, XBaith, Xiaogang Zhou, Xiaoguang Sun, Xingcan Cui, Xintong Song, Xuannan, Yang Liu, Yangze Guo, Yichao Yang, Yikun Jiang, Yu Li, Yuan Mei, Yubin Li, Yun Gao, Yun Tang, Yun Wang, Zhenhua Yang, Zhijiang, Zhu Zhu, acesine, acqua.csq, austin ce, bigdata-ny, billyrrr, caozhen, caozhen1937, chaojianok, chenkai, chris, cpugputpu, dalong01.liu, darionyaphet, dijie, diohabara, dufeng1010, fangliang, felixzheng, gkrishna, gm7y8, godfrey he, godfreyhe, gsralex, haseeb1431, hequn.chq, hequn8128, houmaozheng, huangxiao, huangxingbo, huzekang, jPrest, jasonlee, jinfeng, jinhai, johnm, jxeditor, kecheng, kevin.cyj, kevinzwx, klion26, leiqiang, libenchao, lijiewang.wlj, liufangliang, liujiangang, liuyongvs, liuyufei9527, lsy, lzy3261944, mans2singh, molsionmo, openopen2, pengweibo, rinkako, sanshi@wwdz.onaliyun.com, secondChoice, seunjjs, shaokan.cao, shizhengchao, shizk233, shouweikun, spurthi chaganti, sujun, sunjincheng121, sxnan, tison, totorooo, venn, vthinkxie, wangsong2, wangtong, wangxiyuan, wangxlong, wangyang0918, wangzzu, weizheng92, whlwanghailong, wineandcheeze, wooplevip, wtog, wudi28, wxp, xcomp, xiaoHoly, xiaolong.wang, yangyichao-mango, yingshin, yushengnan, yushujun, yuzhao.cyz, zhangap, zhangmang, zhangzhanchum, zhangzhanchun, zhangzhanhua, zhangzp, zheyu, zhijiang, zhushang, zhuxiaoshang, zlzhang0122, zodo, zoudan, zouzhiye
-`}),e.add({id:155,href:"/2020/12/02/improvements-in-task-scheduling-for-batch-workloads-in-apache-flink-1.12/",title:"Improvements in task scheduling for batch workloads in Apache Flink 1.12",section:"Flink Blog",content:`The Flink community has been working for some time on making Flink a truly unified batch and stream processing system. Achieving this involves touching a lot of different components of the Flink stack, from the user-facing APIs all the way to low-level operator processes such as task scheduling. In this blogpost, we’ll take a closer look at how far the community has come in improving scheduling for batch workloads, why this matters and what you can expect in the Flink 1.12 release with the new pipelined region scheduler.
+`}),e.add({id:156,href:"/2020/12/02/improvements-in-task-scheduling-for-batch-workloads-in-apache-flink-1.12/",title:"Improvements in task scheduling for batch workloads in Apache Flink 1.12",section:"Flink Blog",content:`The Flink community has been working for some time on making Flink a truly unified batch and stream processing system. Achieving this involves touching a lot of different components of the Flink stack, from the user-facing APIs all the way to low-level operator processes such as task scheduling. In this blogpost, we’ll take a closer look at how far the community has come in improving scheduling for batch workloads, why this matters and what you can expect in the Flink 1.12 release with the new pipelined region scheduler.
 Towards unified scheduling # Flink has an internal scheduler to distribute work to all available cluster nodes, taking resource utilization, state locality and recovery into account. How do you write a scheduler for a unified batch and streaming system? To answer this question, let&rsquo;s first have a look into the high-level differences between batch and streaming scheduling requirements.
 Streaming # Streaming jobs usually require that all operator subtasks are running in parallel at the same time, for an indefinite time. Therefore, all the required resources to run these jobs have to be provided upfront, and all operator subtasks must be deployed at once.
 Flink: Streaming job example Because there are no finite intermediate results, a streaming job always has to be restarted fully from a checkpoint or a savepoint in case of failure.
@@ -2908,7 +2918,7 @@
 Slots and resources # A TaskManager instance has a certain number of virtual slots. Each slot represents a certain part of the TaskManager’s physical resources to run the operator subtasks, and each subtask is deployed into a slot of the TaskManager. A slot can run multiple subtasks from different operators at the same time, usually chained together.
 Scheduling strategy # Scheduling in Flink is a process of searching for and allocating appropriate resources (slots) from the TaskManagers to run the subtasks and produce results. The scheduling strategy reacts on scheduling events (like start job, subtask failed or finished etc) to decide which subtask to deploy next.
 For instance, it does not make sense to schedule subtasks whose inputs are not ready to consume yet to avoid wasting resources. Another example is to schedule subtasks which are connected with pipelined edges together, to avoid deadlocks caused by backpressure.
-`}),e.add({id:156,href:"/2020/11/11/stateful-functions-2.2.1-release-announcement/",title:"Stateful Functions 2.2.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink community released the first bugfix release of the Stateful Functions (StateFun) 2.2 series, version 2.2.1.
+`}),e.add({id:157,href:"/2020/11/11/stateful-functions-2.2.1-release-announcement/",title:"Stateful Functions 2.2.1 Release Announcement",section:"Flink Blog",content:`The Apache Flink community released the first bugfix release of the Stateful Functions (StateFun) 2.2 series, version 2.2.1.
 This release fixes a critical bug that causes restoring the Stateful Functions cluster from snapshots (checkpoints or savepoints) to fail under certain conditions. Starting from this release, StateFun now creates snapshots with a more robust format that allows it to be restored safely going forward.
 We strongly recommend all users to upgrade to 2.2.1. Please see the following sections on instructions and things to keep in mind for this upgrade.
 For new users just starting out with Stateful Functions # We strongly recommend to skip all previous versions and start using StateFun from version 2.2.1. This guarantees that failure recovery from checkpoints, or application upgrades using savepoints will work as expected for you.
@@ -2916,7 +2926,7 @@
 The follow-up hotfix release 2.2.2 is expected to be ready within another 2~3 weeks, as it requires a new hotfix release from Flink core, and ultimately an upgrade of the Flink dependency in StateFun. We’ll update the community via the Flink mailing lists as soon as this is ready, so please subscribe to the mailing lists for important updates for this!
 You can find the binaries on the updated Downloads page.
 This release includes 6 fixes and minor improvements since StateFun 2.2.0. Below is a detailed list of all fixes and improvements:
-Bug [FLINK-19515] - Async RequestReply handler concurrency bug [FLINK-19692] - Can&#39;t restore feedback channel from savepoint [FLINK-19866] - FunctionsStateBootstrapOperator.createStateAccessor fails due to uninitialized runtimeContext Improvement [FLINK-19826] - StateFun Dockerfile copies plugins with a specific version instead of a wildcard [FLINK-19827] - Allow the harness to start with a user provided Flink configuration [FLINK-19840] - Add a rocksdb and heap timers configuration validation `}),e.add({id:157,href:"/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/",title:"From Aligned to Unaligned Checkpoints - Part 1: Checkpoints, Alignment, and Backpressure",section:"Flink Blog",content:`Apache Flink’s checkpoint-based fault tolerance mechanism is one of its defining features. Because of that design, Flink unifies batch and stream processing, can easily scale to both very small and extremely large scenarios and provides support for many operational features like stateful upgrades with state evolution or roll-backs and time-travel.
+Bug [FLINK-19515] - Async RequestReply handler concurrency bug [FLINK-19692] - Can&#39;t restore feedback channel from savepoint [FLINK-19866] - FunctionsStateBootstrapOperator.createStateAccessor fails due to uninitialized runtimeContext Improvement [FLINK-19826] - StateFun Dockerfile copies plugins with a specific version instead of a wildcard [FLINK-19827] - Allow the harness to start with a user provided Flink configuration [FLINK-19840] - Add a rocksdb and heap timers configuration validation `}),e.add({id:158,href:"/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/",title:"From Aligned to Unaligned Checkpoints - Part 1: Checkpoints, Alignment, and Backpressure",section:"Flink Blog",content:`Apache Flink’s checkpoint-based fault tolerance mechanism is one of its defining features. Because of that design, Flink unifies batch and stream processing, can easily scale to both very small and extremely large scenarios and provides support for many operational features like stateful upgrades with state evolution or roll-backs and time-travel.
 Despite all these great properties, Flink&rsquo;s checkpointing method has an Achilles Heel: the speed of a completed checkpoint is determined by the speed at which data flows through the application. When the application backpressures, the processing of checkpoints is backpressured as well (Appendix 1 recaps what is backpressure and why it can be a good thing). In such cases, checkpoints may take longer to complete or even time out completely.
 In Flink 1.11, the community introduced a first version of a new feature called &ldquo;unaligned checkpoints&rdquo; that aims at solving this issue, while Flink 1.12 plans to further expand its functionality. In this two-series blog post, we discuss how Flink’s checkpointing mechanism has been modified to support unaligned checkpoints, how unaligned checkpoints work, and how this new mode impacts Flink users. In the first of the two posts, we start with a recap of the original checkpointing process in Flink, its core properties and issues under backpressure.
 State in Streaming Applications # Simply put, state is the information that you need to remember across events. Even the most trivial streaming applications are typically stateful because of their need to “remember” the exact position they are processing data from, for example in the form of a Kafka Partition Offset or a File Offset. In addition, many applications hold state internally as a way to support their internal operations, such as windows, aggregations, joins, or state machines.
@@ -2945,7 +2955,7 @@
 We have a source (let’s say reading data from Apache Kafka), parsing data, grouping and aggregating data by a key, and writing it to a sink system (some database). The application needs to re-group data by key between the parsing and the grouping/aggregation step. Let’s assume we use a non-backpressure approach, like writing the data to a log/MQ for the data re-grouping over the network (the approach used by Kafka Streams). If the sink is now slower than the remaining parts of the streaming application (which can easily happen), the first stage (source and parse) will still work as fast as possible to pull data out of the source, parse it, and put it into the log for the shuffle. That intermediate log will accumulate a lot of data, meaning it needs significant capacity, so that in a worst case scenario can hold a full copy of the input data or otherwise result in data loss (when the drift is greater than the retention time).
 With backpressure, the source/parse stage slows down to match the speed of the sink, keeping both parts of the application closer together in their progress through the data, and avoiding the need to provision a lot of intermediate storage capacity.
 We&rsquo;d like to thank Marta Paes Moreira and Markos Sfikas for the wonderful review process.
-`}),e.add({id:158,href:"/2020/10/13/stateful-functions-internals-behind-the-scenes-of-stateful-serverless/",title:"Stateful Functions Internals: Behind the scenes of Stateful Serverless",section:"Flink Blog",content:`Stateful Functions (StateFun) simplifies the building of distributed stateful applications by combining the best of two worlds: the strong messaging and state consistency guarantees of stateful stream processing, and the elasticity and serverless experience of today&rsquo;s cloud-native architectures and popular event-driven FaaS platforms. Typical StateFun applications consist of functions deployed behind simple services using these modern platforms, with a separate StateFun cluster playing the role of an “event-driven database” that provides consistency and fault-tolerance for the functions&rsquo; state and messaging.
+`}),e.add({id:159,href:"/2020/10/13/stateful-functions-internals-behind-the-scenes-of-stateful-serverless/",title:"Stateful Functions Internals: Behind the scenes of Stateful Serverless",section:"Flink Blog",content:`Stateful Functions (StateFun) simplifies the building of distributed stateful applications by combining the best of two worlds: the strong messaging and state consistency guarantees of stateful stream processing, and the elasticity and serverless experience of today&rsquo;s cloud-native architectures and popular event-driven FaaS platforms. Typical StateFun applications consist of functions deployed behind simple services using these modern platforms, with a separate StateFun cluster playing the role of an “event-driven database” that provides consistency and fault-tolerance for the functions&rsquo; state and messaging.
 But how exactly does StateFun achieve that? How does the StateFun cluster communicate with the functions?
 This blog dives deep into the internals of the StateFun runtime. The entire walkthrough is complemented by a demo application which can be completely deployed on AWS services. Most significantly, in the demo, the stateful functions are deployed and serviced using AWS Lambda, a popular FaaS platform among many others. The goal here is to allow readers to have a good grasp of the interaction between the StateFun runtime and the functions, how that works cohesively to provide a Stateful Serverless experience, and how they can apply what they&rsquo;ve learnt to deploy their StateFun applications on other public cloud offerings such as GCP or Microsoft Azure.
 Introducing the example: Shopping Cart # Note You can find the full code [here](https://github.com/tzulitai/statefun-aws-demo/blob/master/app/shopping_cart.py), which uses StateFun's [Python SDK](//nightlies.apache.org/flink/flink-statefun-docs-master/sdk/python.html). Alternatively, if you are unfamiliar with StateFun's API, you can check out this [earlier blog](https://flink.apache.org/2020/08/19/statefun.html) on modeling applications and stateful entities using [StateFun's programming constructs](//nightlies.apache.org/flink/flink-statefun-docs-master/concepts/application-building-blocks.html). Let’s first take a look at a high-level overview of the motivating demo for this blog post: a shopping cart application. The diagram below covers the functions that build up the application, the state that the functions would keep, and the messages that flow between them. We’ll be referencing this example throughout the blog post.
@@ -2974,7 +2984,7 @@
 In our complementary demo code, you can find here the exact code on how to expose and service StateFun functions through AWS Lambda. Likewise, this is possible for any other FaaS platform that supports triggering the functions using HTTP endpoints (and other transports as well in the future).
 Fig. 5 on the right illustrates what a complete AWS deployment of a StateFun application would look like, with functions serviced via AWS Lambda, AWS Kinesis streams as ingresses and egresses, AWS EKS managed Kubernetes cluster to run the StateFun cluster, and an AWS S3 bucket to store the periodic checkpoints. You can also follow the instructions in the demo code to try it out and deploy this yourself right away!
 If you’d like to learn more about Stateful Functions, head over to the official documentation, where you can also find more hands-on tutorials to try out yourself!
-`}),e.add({id:159,href:"/2020/09/28/stateful-functions-2.2.0-release-announcement/",title:"Stateful Functions 2.2.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is happy to announce the release of Stateful Functions (StateFun) 2.2.0! This release introduces major features that extend the SDKs, such as support for asynchronous functions in the Python SDK, new persisted state constructs, and a new SDK that allows embedding StateFun functions within a Flink DataStream job. Moreover, we&rsquo;ve also included important changes that improve out-of-the-box stability for common workloads, as well as increased observability for operational purposes.
+`}),e.add({id:160,href:"/2020/09/28/stateful-functions-2.2.0-release-announcement/",title:"Stateful Functions 2.2.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is happy to announce the release of Stateful Functions (StateFun) 2.2.0! This release introduces major features that extend the SDKs, such as support for asynchronous functions in the Python SDK, new persisted state constructs, and a new SDK that allows embedding StateFun functions within a Flink DataStream job. Moreover, we&rsquo;ve also included important changes that improve out-of-the-box stability for common workloads, as well as increased observability for operational purposes.
 We&rsquo;ve also seen new 3rd party SDKs for StateFun being developed since the last release. While they are not part of the release artifacts, it&rsquo;s great seeing these community-driven additions! We&rsquo;ve highlighted these efforts below in this announcement.
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website, and the most recent Python SDK distribution is available on PyPI. For more details, check the complete release changelog and the updated documentation. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA!
 New Features # Asynchronous functions in Python SDK # This release enables registering asynchronous Python functions as stateful functions by introducing a new handler in the Python SDK: AsyncRequestReplyHandler. This allows serving StateFun functions with Python web frameworks that support asynchronous IO natively (for example, aiohttp):
@@ -2994,13 +3004,13 @@
 List of Contributors # The Apache Flink community would like to thank all contributors that have made this release possible:
 abc863377, Authuir, Chesnay Schepler, Congxian Qiu, David Anderson, Dian Fu, Francesco Guardiani, Igal Shilman, Marta Paes Moreira, Patrick Wiener, Rafi Aroch, Seth Wiesman, Stephan Ewen, Tzu-Li (Gordon) Tai, Ufuk Celebi
 If you’d like to get involved, we’re always looking for new contributors.
-`}),e.add({id:160,href:"/2020/09/17/apache-flink-1.11.2-released/",title:"Apache Flink 1.11.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.11 series.
+`}),e.add({id:161,href:"/2020/09/17/apache-flink-1.11.2-released/",title:"Apache Flink 1.11.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.11 series.
 This release includes 96 fixes and minor improvements for Flink 1.11.1. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.11.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.11.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-16087] - Translate &quot;Detecting Patterns&quot; page of &quot;Streaming Concepts&quot; into Chinese [FLINK-18264] - Translate the &quot;External Resource Framework&quot; page into Chinese [FLINK-18628] - Invalid error message for overloaded methods with same parameter name [FLINK-18801] - Add a &quot;10 minutes to Table API&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18910] - Create the new document structure for Python documentation according to FLIP-133 [FLINK-18912] - Add a Table API tutorial link(linked to try-flink/python_table_api.md) under the &quot;Python API&quot; -&gt; &quot;GettingStart&quot; -&gt; &quot;Tutorial&quot; section [FLINK-18913] - Add a &quot;TableEnvironment&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18917] - Add a &quot;Built-in Functions&quot; link (linked to dev/table/functions/systemFunctions.md) under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-19110] - Flatten current PyFlink documentation structure Bug [FLINK-14087] - throws java.lang.ArrayIndexOutOfBoundsException when emiting the data using RebalancePartitioner. [FLINK-15467] - Should wait for the end of the source thread during the Task cancellation [FLINK-16510] - Task manager safeguard shutdown may not be reliable [FLINK-16827] - StreamExecTemporalSort should require a distribution trait in StreamExecTemporalSortRule [FLINK-18081] - Fix broken links in &quot;Kerberos Authentication Setup and Configuration&quot; doc [FLINK-18212] - Init lookup join failed when use udf on lookup table [FLINK-18341] - Building Flink Walkthrough Table Java 0.1 COMPILATION ERROR [FLINK-18421] - Elasticsearch (v6.3.1) sink end-to-end test instable [FLINK-18468] - TaskExecutorITCase.testJobReExecutionAfterTaskExecutorTermination fails with DuplicateJobSubmissionException [FLINK-18552] - Update migration tests in master to cover migration from release-1.11 [FLINK-18581] - Cannot find GC cleaner with java version previous jdk8u72(-b01) [FLINK-18588] - hive ddl create table should support &#39;if not exists&#39; [FLINK-18595] - Deadlock during job shutdown [FLINK-18600] - Kerberized YARN per-job on Docker test failed to download JDK 8u251 [FLINK-18608] - CustomizedConvertRule#convertCast drops nullability [FLINK-18612] - WordCount example failure when setting relative output path [FLINK-18632] - RowData&#39;s row kind do not assigned from input row data when sink code generate and physical type info is pojo type [FLINK-18639] - Error messages from BashJavaUtils are eaten [FLINK-18641] - &quot;Failure to finalize checkpoint&quot; error in MasterTriggerRestoreHook [FLINK-18646] - Managed memory released check can block RPC thread [FLINK-18650] - The description of dispatcher in Flink Architecture document is not accurate [FLINK-18655] - Set failOnUnableToExtractRepoInfo to false for git-commit-id-plugin in module flink-runtime [FLINK-18656] - Start Delay metric is always zero for unaligned checkpoints [FLINK-18659] - FileNotFoundException when writing Hive orc tables [FLINK-18663] - RestServerEndpoint may prevent server shutdown [FLINK-18665] - Filesystem connector should use TableSchema exclude computed columns [FLINK-18672] - Fix Scala code examples for UDF type inference annotations [FLINK-18677] - ZooKeeperLeaderRetrievalService does not invalidate leader in case of SUSPENDED connection [FLINK-18682] - Vector orc reader cannot read Hive 2.0.0 table [FLINK-18697] - Adding flink-table-api-java-bridge_2.11 to a Flink job kills the IDE logging [FLINK-18700] - Debezium-json format throws Exception when PG table&#39;s IDENTITY config is not FULL [FLINK-18705] - Debezium-JSON throws NPE when tombstone message is received [FLINK-18708] - The links of the connector sql jar of Kafka 0.10 and 0.11 are extinct [FLINK-18710] - ResourceProfileInfo is not serializable [FLINK-18748] - Savepoint would be queued unexpected if pendingCheckpoints less than maxConcurrentCheckpoints [FLINK-18749] - Correct dependencies in Kubernetes pom [FLINK-18750] - SqlValidatorException thrown when select from a view which contains a UDTF call [FLINK-18769] - MiniBatch doesn&#39;t work with FLIP-95 source [FLINK-18821] - Netty client retry mechanism may cause PartitionRequestClientFactory#createPartitionRequestClient to wait infinitely [FLINK-18832] - BoundedBlockingSubpartition does not work with StreamTask [FLINK-18856] - CheckpointCoordinator ignores checkpointing.min-pause [FLINK-18859] - ExecutionGraphNotEnoughResourceTest.testRestartWithSlotSharingAndNotEnoughResources failed with &quot;Condition was not met in given timeout.&quot; [FLINK-18862] - Fix LISTAGG throws BinaryRawValueData cannot be cast to StringData exception in runtime [FLINK-18867] - Generic table stored in Hive catalog is incompatible between 1.10 and 1.11 [FLINK-18900] - HiveCatalog should error out when listing partitions with an invalid spec [FLINK-18902] - Cannot serve results of asynchronous REST operations in per-job mode [FLINK-18941] - There are some typos in &quot;Set up JobManager Memory&quot; [FLINK-18942] - HiveTableSink shouldn&#39;t try to create BulkWriter factory when using MR writer [FLINK-18956] - StreamTask.invoke should catch Throwable instead of Exception [FLINK-18959] - Fail to archiveExecutionGraph because job is not finished when dispatcher close [FLINK-18992] - Table API renameColumns method annotation error [FLINK-18993] - Invoke sanityCheckTotalFlinkMemory method incorrectly in JobManagerFlinkMemoryUtils.java [FLINK-18994] - There is one typo in &quot;Set up TaskManager Memory&quot; [FLINK-19040] - SourceOperator is not closing SourceReader [FLINK-19061] - HiveCatalog fails to get partition column stats if partition value contains special characters [FLINK-19094] - Revise the description of watermark strategy in Flink Table document [FLINK-19108] - Stop expanding the identifiers with scope aliased by the system with &#39;EXPR$&#39; prefix [FLINK-19109] - Split Reader eats chained periodic watermarks [FLINK-19121] - Avoid accessing HDFS frequently in HiveBulkWriterFactory [FLINK-19133] - User provided kafka partitioners are not initialized correctly [FLINK-19148] - Table crashed in Flink Table API &amp; SQL Docs [FLINK-19166] - StreamingFileWriter should register Listener before the initialization of buckets Improvement [FLINK-16619] - Misleading SlotManagerImpl logging for slot reports of unknown task manager [FLINK-17075] - Add task status reconciliation between TM and JM [FLINK-17285] - Translate &quot;Python Table API&quot; page into Chinese [FLINK-17503] - Make memory configuration logging more user-friendly [FLINK-18598] - Add instructions for asynchronous execute in PyFlink doc [FLINK-18618] - Docker e2e tests are failing on CI [FLINK-18619] - Update training to use WatermarkStrategy [FLINK-18635] - Typo in &#39;concepts/timely stream processing&#39; part of the website [FLINK-18643] - Migrate Jenkins jobs to ci-builds.apache.org [FLINK-18644] - Remove obsolete doc for hive connector [FLINK-18730] - Remove Beta tag from SQL Client docs [FLINK-18772] - Hide submit job web ui elements when running in per-job/application mode [FLINK-18793] - Fix Typo for api.common.eventtime.WatermarkStrategy Description [FLINK-18797] - docs and examples use deprecated forms of keyBy [FLINK-18816] - Correct API usage in Pyflink Dependency Management page [FLINK-18831] - Improve the Python documentation about the operations in Table [FLINK-18839] - Add documentation about how to use catalog in Python Table API [FLINK-18847] - Add documentation about data types in Python Table API [FLINK-18849] - Improve the code tabs of the Flink documents [FLINK-18881] - Modify the Access Broken Link [FLINK-19055] - MemoryManagerSharedResourcesTest contains three tests running extraordinary long [FLINK-19105] - Table API Sample Code Error Task [FLINK-18666] - Update japicmp configuration for 1.11.1 [FLINK-18667] - Data Types documentation misunderstand users [FLINK-18678] - Hive connector fails to create vector orc reader if user specifies incorrect hive version `}),e.add({id:161,href:"/2020/09/04/flink-community-update-august20/",title:"Flink Community Update - August'20",section:"Flink Blog",content:`Ah, so much for a quiet August month. This time around, we bring you some new Flink Improvement Proposals (FLIPs), a preview of the upcoming Flink Stateful Functions 2.2 release and a look into how far Flink has come in comparison to 2019.
+Sub-task [FLINK-16087] - Translate &quot;Detecting Patterns&quot; page of &quot;Streaming Concepts&quot; into Chinese [FLINK-18264] - Translate the &quot;External Resource Framework&quot; page into Chinese [FLINK-18628] - Invalid error message for overloaded methods with same parameter name [FLINK-18801] - Add a &quot;10 minutes to Table API&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18910] - Create the new document structure for Python documentation according to FLIP-133 [FLINK-18912] - Add a Table API tutorial link(linked to try-flink/python_table_api.md) under the &quot;Python API&quot; -&gt; &quot;GettingStart&quot; -&gt; &quot;Tutorial&quot; section [FLINK-18913] - Add a &quot;TableEnvironment&quot; document under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-18917] - Add a &quot;Built-in Functions&quot; link (linked to dev/table/functions/systemFunctions.md) under the &quot;Python API&quot; -&gt; &quot;User Guide&quot; -&gt; &quot;Table API&quot; section [FLINK-19110] - Flatten current PyFlink documentation structure Bug [FLINK-14087] - throws java.lang.ArrayIndexOutOfBoundsException when emiting the data using RebalancePartitioner. [FLINK-15467] - Should wait for the end of the source thread during the Task cancellation [FLINK-16510] - Task manager safeguard shutdown may not be reliable [FLINK-16827] - StreamExecTemporalSort should require a distribution trait in StreamExecTemporalSortRule [FLINK-18081] - Fix broken links in &quot;Kerberos Authentication Setup and Configuration&quot; doc [FLINK-18212] - Init lookup join failed when use udf on lookup table [FLINK-18341] - Building Flink Walkthrough Table Java 0.1 COMPILATION ERROR [FLINK-18421] - Elasticsearch (v6.3.1) sink end-to-end test instable [FLINK-18468] - TaskExecutorITCase.testJobReExecutionAfterTaskExecutorTermination fails with DuplicateJobSubmissionException [FLINK-18552] - Update migration tests in master to cover migration from release-1.11 [FLINK-18581] - Cannot find GC cleaner with java version previous jdk8u72(-b01) [FLINK-18588] - hive ddl create table should support &#39;if not exists&#39; [FLINK-18595] - Deadlock during job shutdown [FLINK-18600] - Kerberized YARN per-job on Docker test failed to download JDK 8u251 [FLINK-18608] - CustomizedConvertRule#convertCast drops nullability [FLINK-18612] - WordCount example failure when setting relative output path [FLINK-18632] - RowData&#39;s row kind do not assigned from input row data when sink code generate and physical type info is pojo type [FLINK-18639] - Error messages from BashJavaUtils are eaten [FLINK-18641] - &quot;Failure to finalize checkpoint&quot; error in MasterTriggerRestoreHook [FLINK-18646] - Managed memory released check can block RPC thread [FLINK-18650] - The description of dispatcher in Flink Architecture document is not accurate [FLINK-18655] - Set failOnUnableToExtractRepoInfo to false for git-commit-id-plugin in module flink-runtime [FLINK-18656] - Start Delay metric is always zero for unaligned checkpoints [FLINK-18659] - FileNotFoundException when writing Hive orc tables [FLINK-18663] - RestServerEndpoint may prevent server shutdown [FLINK-18665] - Filesystem connector should use TableSchema exclude computed columns [FLINK-18672] - Fix Scala code examples for UDF type inference annotations [FLINK-18677] - ZooKeeperLeaderRetrievalService does not invalidate leader in case of SUSPENDED connection [FLINK-18682] - Vector orc reader cannot read Hive 2.0.0 table [FLINK-18697] - Adding flink-table-api-java-bridge_2.11 to a Flink job kills the IDE logging [FLINK-18700] - Debezium-json format throws Exception when PG table&#39;s IDENTITY config is not FULL [FLINK-18705] - Debezium-JSON throws NPE when tombstone message is received [FLINK-18708] - The links of the connector sql jar of Kafka 0.10 and 0.11 are extinct [FLINK-18710] - ResourceProfileInfo is not serializable [FLINK-18748] - Savepoint would be queued unexpected if pendingCheckpoints less than maxConcurrentCheckpoints [FLINK-18749] - Correct dependencies in Kubernetes pom [FLINK-18750] - SqlValidatorException thrown when select from a view which contains a UDTF call [FLINK-18769] - MiniBatch doesn&#39;t work with FLIP-95 source [FLINK-18821] - Netty client retry mechanism may cause PartitionRequestClientFactory#createPartitionRequestClient to wait infinitely [FLINK-18832] - BoundedBlockingSubpartition does not work with StreamTask [FLINK-18856] - CheckpointCoordinator ignores checkpointing.min-pause [FLINK-18859] - ExecutionGraphNotEnoughResourceTest.testRestartWithSlotSharingAndNotEnoughResources failed with &quot;Condition was not met in given timeout.&quot; [FLINK-18862] - Fix LISTAGG throws BinaryRawValueData cannot be cast to StringData exception in runtime [FLINK-18867] - Generic table stored in Hive catalog is incompatible between 1.10 and 1.11 [FLINK-18900] - HiveCatalog should error out when listing partitions with an invalid spec [FLINK-18902] - Cannot serve results of asynchronous REST operations in per-job mode [FLINK-18941] - There are some typos in &quot;Set up JobManager Memory&quot; [FLINK-18942] - HiveTableSink shouldn&#39;t try to create BulkWriter factory when using MR writer [FLINK-18956] - StreamTask.invoke should catch Throwable instead of Exception [FLINK-18959] - Fail to archiveExecutionGraph because job is not finished when dispatcher close [FLINK-18992] - Table API renameColumns method annotation error [FLINK-18993] - Invoke sanityCheckTotalFlinkMemory method incorrectly in JobManagerFlinkMemoryUtils.java [FLINK-18994] - There is one typo in &quot;Set up TaskManager Memory&quot; [FLINK-19040] - SourceOperator is not closing SourceReader [FLINK-19061] - HiveCatalog fails to get partition column stats if partition value contains special characters [FLINK-19094] - Revise the description of watermark strategy in Flink Table document [FLINK-19108] - Stop expanding the identifiers with scope aliased by the system with &#39;EXPR$&#39; prefix [FLINK-19109] - Split Reader eats chained periodic watermarks [FLINK-19121] - Avoid accessing HDFS frequently in HiveBulkWriterFactory [FLINK-19133] - User provided kafka partitioners are not initialized correctly [FLINK-19148] - Table crashed in Flink Table API &amp; SQL Docs [FLINK-19166] - StreamingFileWriter should register Listener before the initialization of buckets Improvement [FLINK-16619] - Misleading SlotManagerImpl logging for slot reports of unknown task manager [FLINK-17075] - Add task status reconciliation between TM and JM [FLINK-17285] - Translate &quot;Python Table API&quot; page into Chinese [FLINK-17503] - Make memory configuration logging more user-friendly [FLINK-18598] - Add instructions for asynchronous execute in PyFlink doc [FLINK-18618] - Docker e2e tests are failing on CI [FLINK-18619] - Update training to use WatermarkStrategy [FLINK-18635] - Typo in &#39;concepts/timely stream processing&#39; part of the website [FLINK-18643] - Migrate Jenkins jobs to ci-builds.apache.org [FLINK-18644] - Remove obsolete doc for hive connector [FLINK-18730] - Remove Beta tag from SQL Client docs [FLINK-18772] - Hide submit job web ui elements when running in per-job/application mode [FLINK-18793] - Fix Typo for api.common.eventtime.WatermarkStrategy Description [FLINK-18797] - docs and examples use deprecated forms of keyBy [FLINK-18816] - Correct API usage in Pyflink Dependency Management page [FLINK-18831] - Improve the Python documentation about the operations in Table [FLINK-18839] - Add documentation about how to use catalog in Python Table API [FLINK-18847] - Add documentation about data types in Python Table API [FLINK-18849] - Improve the code tabs of the Flink documents [FLINK-18881] - Modify the Access Broken Link [FLINK-19055] - MemoryManagerSharedResourcesTest contains three tests running extraordinary long [FLINK-19105] - Table API Sample Code Error Task [FLINK-18666] - Update japicmp configuration for 1.11.1 [FLINK-18667] - Data Types documentation misunderstand users [FLINK-18678] - Hive connector fails to create vector orc reader if user specifies incorrect hive version `}),e.add({id:162,href:"/2020/09/04/flink-community-update-august20/",title:"Flink Community Update - August'20",section:"Flink Blog",content:`Ah, so much for a quiet August month. This time around, we bring you some new Flink Improvement Proposals (FLIPs), a preview of the upcoming Flink Stateful Functions 2.2 release and a look into how far Flink has come in comparison to 2019.
 The Past Month in Flink # Flink Releases # Getting Ready for Flink Stateful Functions 2.2 # The details of the next release of Stateful Functions are under discussion in this @dev mailing list thread, and the feature freeze is set for September 10th — so, you can expect Stateful Functions 2.2 to be released soon after! Some of the most relevant features in the upcoming release are:
 DataStream API interoperability, allowing users to embed Stateful Functions pipelines in regular DataStream API programs with DataStream ingress/egress.
 Fine-grained control over state for remote functions, including the ability to configure different state expiration modes for each individual function.
@@ -3039,7 +3049,7 @@
 Real-Time Stock Processing With Apache NiFi, Apache Flink and Apache Kafka
 Using the Mm FLaNK Stack for Edge AI (Apache MXNet, Apache Flink, Apache NiFi, Apache Kafka, Apache Kudu)
 Blogposts Flink 1.11 Series The State of Flink on Docker Accelerating your workload with GPU and other external resources PyFlink: The integration of Pandas into PyFlink Other Monitoring and Controlling Networks of IoT Devices with Flink Stateful Functions Advanced Flink Application Patterns Vol.3: Custom Window Processing Flink Packages Flink Packages is a website where you can explore (and contribute to) the Flink ecosystem of connectors, extensions, APIs, tools and integrations. New in: Flink CDC Connectors Flink File Source Flink DynamoDB Connector If you’d like to keep a closer eye on what’s happening in the community, subscribe to the Flink @community mailing list to get fine-grained weekly updates, upcoming event announcements and more.
-`}),e.add({id:162,href:"/2020/09/01/memory-management-improvements-for-flinks-jobmanager-in-apache-flink-1.11/",title:"Memory Management improvements for Flink’s JobManager in Apache Flink 1.11",section:"Flink Blog",content:`Apache Flink 1.11 comes with significant changes to the memory model of Flink’s JobManager and configuration options for your Flink clusters. These recently-introduced changes make Flink adaptable to all kinds of deployment environments (e.g. Kubernetes, Yarn, Mesos), providing better control over its memory consumption.
+`}),e.add({id:163,href:"/2020/09/01/memory-management-improvements-for-flinks-jobmanager-in-apache-flink-1.11/",title:"Memory Management improvements for Flink’s JobManager in Apache Flink 1.11",section:"Flink Blog",content:`Apache Flink 1.11 comes with significant changes to the memory model of Flink’s JobManager and configuration options for your Flink clusters. These recently-introduced changes make Flink adaptable to all kinds of deployment environments (e.g. Kubernetes, Yarn, Mesos), providing better control over its memory consumption.
 The previous blog post focused on the memory model of the TaskManagers and how it was improved in Flink 1.10. This post addresses the same topic but for the JobManager instead. Flink 1.11 unifies the memory model of Flink’s processes. The newly-introduced memory model of the JobManager follows a similar approach to that of the TaskManagers; it is simpler and has fewer components and tuning knobs. This post might consequently seem very similar to our previous story on Flink’s memory but aims at providing a complete overview of Flink’s JobManager memory model as of Flink 1.11. Read on for a full list of updates and changes below!
 Introduction to Flink’s process memory model # Having a clear understanding of Apache Flink’s process memory model allows you to manage resources for the various workloads more efficiently. The following diagram illustrates the main memory components of a Flink process:
 Flink: Total Process Memory The JobManager process is a JVM process. On a high level, its memory consists of the JVM Heap and Off-Heap memory. These types of memory are consumed by Flink directly or by the JVM for its specific purposes (i.e. metaspace). There are two major memory consumers within the JobManager process: the framework itself consuming memory for internal data structures, network communication, etc. and the user code which runs within the JobManager process, e.g. in certain batch sources or during checkpoint completion callbacks.
@@ -3055,7 +3065,7 @@
 JVM metaspace requires additional memory. If you encounter OutOfMemoryError: Metaspace, Flink provides an option to increase its default limit and the JVM will ensure that it is not exceeded. The metaspace size of a Flink JVM process is always explicitly set in contrast to the default JVM settings where it is not limited.
 JVM requires more internal memory. There is no direct control over certain types of JVM process allocations but Flink provides JVM Overhead options. The JVM Overhead options allow declaring an additional amount of memory, anticipated for those allocations and not covered by other options.
 Conclusion # The latest Flink release (Flink 1.11) introduces some notable changes to the memory configuration of Flink’s JobManager, making its memory management significantly easier than before. Stay tuned for more additions and features in upcoming releases. If you have any suggestions or questions for the Flink community, we encourage you to sign up to the Apache Flink mailing lists and become part of the discussion.
-`}),e.add({id:163,href:"/2020/08/25/apache-flink-1.10.2-released/",title:"Apache Flink 1.10.2 Released",section:"Flink Blog",content:"The Apache Flink community released the second bugfix version of the Apache Flink 1.10 series.\nThis release includes 73 fixes and minor improvements for Flink 1.10.1. The list below includes a detailed list of all fixes and improvements.\nWe highly recommend all users to upgrade to Flink 1.10.2.\nNote After FLINK-18242, the deprecated `OptionsFactory` and `ConfigurableOptionsFactory` classes are removed (not applicable for release-1.10), please use `RocksDBOptionsFactory` and `ConfigurableRocksDBOptionsFactory` instead. Please also recompile your application codes if any class extending `DefaultConfigurableOptionsFactory` Note After FLINK-17800 by default we will set `setTotalOrderSeek` to true for RocksDB's `ReadOptions`, to prevent user from miss using `optimizeForPointLookup`. Meantime we support customizing `ReadOptions` through `RocksDBOptionsFactory`. Please set `setTotalOrderSeek` back to false if any performance regression observed (normally won't happen according to our testing). Updated Maven dependencies:\n&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.10.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.\nList of resolved issues:\nSub-task [FLINK-15836] - Throw fatal error in KubernetesResourceManager when the pods watcher is closed with exception [FLINK-16160] - Schema#proctime and Schema#rowtime don&#39;t work in TableEnvironment#connect code path Bug [FLINK-13689] - Rest High Level Client for Elasticsearch6.x connector leaks threads if no connection could be established [FLINK-14369] - KafkaProducerAtLeastOnceITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceCustomOperator fails on Travis [FLINK-14836] - Unable to set yarn container number for scala shell in yarn mode [FLINK-14894] - HybridOffHeapUnsafeMemorySegmentTest#testByteBufferWrap failed on Travis [FLINK-15758] - Investigate potential out-of-memory problems due to managed unsafe memory allocation [FLINK-15849] - Update SQL-CLIENT document from type to data-type [FLINK-16309] - ElasticSearch 7 connector is missing in SQL connector list [FLINK-16346] - BlobsCleanupITCase.testBlobServerCleanupCancelledJob fails on Travis [FLINK-16432] - Building Hive connector gives problems [FLINK-16451] - Fix IndexOutOfBoundsException for DISTINCT AGG with constants [FLINK-16510] - Task manager safeguard shutdown may not be reliable [FLINK-17092] - Pyflink test BlinkStreamDependencyTests is instable [FLINK-17322] - Enable latency tracker would corrupt the broadcast state [FLINK-17420] - Cannot alias Tuple and Row fields when converting DataStream to Table [FLINK-17466] - toRetractStream doesn&#39;t work correctly with Pojo conversion class [FLINK-17555] - docstring of pyflink.table.descriptors.FileSystem:1:duplicate object description of pyflink.table.descriptors.FileSystem [FLINK-17558] - Partitions are released in TaskExecutor Main Thread [FLINK-17562] - POST /jars/:jarid/plan is not working [FLINK-17578] - Union of 2 SideOutputs behaviour incorrect [FLINK-17639] - Document which FileSystems are supported by the StreamingFileSink [FLINK-17643] - LaunchCoordinatorTest fails [FLINK-17700] - The callback client of JavaGatewayServer should run in a daemon thread [FLINK-17744] - StreamContextEnvironment#execute cannot be call JobListener#onJobExecuted [FLINK-17763] - No log files when starting scala-shell [FLINK-17788] - scala shell in yarn mode is broken [FLINK-17800] - RocksDB optimizeForPointLookup results in missing time windows [FLINK-17801] - TaskExecutorTest.testHeartbeatTimeoutWithResourceManager timeout [FLINK-17809] - BashJavaUtil script logic does not work for paths with spaces [FLINK-17822] - Nightly Flink CLI end-to-end test failed with &quot;JavaGcCleanerWrapper$PendingCleanersRunner cannot access class jdk.internal.misc.SharedSecrets&quot; in Java 11 [FLINK-17870] - dependent jars are missing to be shipped to cluster in scala shell [FLINK-17891] - FlinkYarnSessionCli sets wrong execution.target type [FLINK-17959] - Exception: &quot;CANCELLED: call already cancelled&quot; is thrown when run python udf [FLINK-18008] - HistoryServer does not log environment information on startup [FLINK-18012] - Deactivate slot timeout if TaskSlotTable.tryMarkSlotActive is called [FLINK-18035] - Executors#newCachedThreadPool could not work as expected [FLINK-18045] - Fix Kerberos credentials checking to unblock Flink on secured MapR [FLINK-18048] - &quot;--host&quot; option could not take effect for standalone application cluster [FLINK-18097] - History server doesn&#39;t clean all job json files [FLINK-18168] - Error results when use UDAF with Object Array return type [FLINK-18223] - AvroSerializer does not correctly instantiate GenericRecord [FLINK-18241] - Custom OptionsFactory in user code not working when configured via flink-conf.yaml [FLINK-18242] - Custom OptionsFactory settings seem to have no effect on RocksDB [FLINK-18297] - SQL client: setting execution.type to invalid value shuts down the session [FLINK-18329] - Dist NOTICE issues [FLINK-18352] - org.apache.flink.core.execution.DefaultExecutorServiceLoader not thread safe [FLINK-18517] - kubernetes session test failed with &quot;java.net.SocketException: Broken pipe&quot; [FLINK-18539] - StreamExecutionEnvironment#addSource(SourceFunction, TypeInformation) doesn&#39;t use the user defined type information [FLINK-18595] - Deadlock during job shutdown [FLINK-18646] - Managed memory released check can block RPC thread [FLINK-18663] - RestServerEndpoint may prevent server shutdown [FLINK-18677] - ZooKeeperLeaderRetrievalService does not invalidate leader in case of SUSPENDED connection [FLINK-18702] - Flink elasticsearch connector leaks threads and classloaders thereof [FLINK-18815] - AbstractCloseableRegistryTest.testClose unstable [FLINK-18821] - Netty client retry mechanism may cause PartitionRequestClientFactory#createPartitionRequestClient to wait infinitely [FLINK-18859] - ExecutionGraphNotEnoughResourceTest.testRestartWithSlotSharingAndNotEnoughResources failed with &quot;Condition was not met in given timeout.&quot; [FLINK-18902] - Cannot serve results of asynchronous REST operations in per-job mode New Feature [FLINK-17844] - Activate japicmp-maven-plugin checks for @PublicEvolving between bug fix releases (x.y.u -&gt; x.y.v) Improvement [FLINK-16217] - SQL Client crashed when any uncatched exception is thrown [FLINK-16225] - Metaspace Out Of Memory should be handled as Fatal Error in TaskManager [FLINK-16619] - Misleading SlotManagerImpl logging for slot reports of unknown task manager [FLINK-16717] - Use headless service for rpc and blob port when flink on K8S [FLINK-17248] - Make the thread nums of io executor of ClusterEntrypoint and MiniCluster configurable [FLINK-17503] - Make memory configuration logging more user-friendly [FLINK-17819] - Yarn error unhelpful when forgetting HADOOP_CLASSPATH [FLINK-17920] - Add the Python example of Interval Join in Table API doc [FLINK-17945] - Improve error reporting of Python CI tests [FLINK-17970] - Increase default value of IO pool executor to 4 * #cores [FLINK-18010] - Add more logging to HistoryServer [FLINK-18501] - Mapping of Pluggable Filesystems to scheme is not properly logged [FLINK-18644] - Remove obsolete doc for hive connector [FLINK-18772] - Hide submit job web ui elements when running in per-job/application mode "}),e.add({id:164,href:"/2020/08/20/the-state-of-flink-on-docker/",title:"The State of Flink on Docker",section:"Flink Blog",content:`With over 50 million downloads from Docker Hub, the Flink docker images are a very popular deployment option.
+`}),e.add({id:164,href:"/2020/08/25/apache-flink-1.10.2-released/",title:"Apache Flink 1.10.2 Released",section:"Flink Blog",content:"The Apache Flink community released the second bugfix version of the Apache Flink 1.10 series.\nThis release includes 73 fixes and minor improvements for Flink 1.10.1. The list below includes a detailed list of all fixes and improvements.\nWe highly recommend all users to upgrade to Flink 1.10.2.\nNote After FLINK-18242, the deprecated `OptionsFactory` and `ConfigurableOptionsFactory` classes are removed (not applicable for release-1.10), please use `RocksDBOptionsFactory` and `ConfigurableRocksDBOptionsFactory` instead. Please also recompile your application codes if any class extending `DefaultConfigurableOptionsFactory` Note After FLINK-17800 by default we will set `setTotalOrderSeek` to true for RocksDB's `ReadOptions`, to prevent user from miss using `optimizeForPointLookup`. Meantime we support customizing `ReadOptions` through `RocksDBOptionsFactory`. Please set `setTotalOrderSeek` back to false if any performance regression observed (normally won't happen according to our testing). Updated Maven dependencies:\n&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.10.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.\nList of resolved issues:\nSub-task [FLINK-15836] - Throw fatal error in KubernetesResourceManager when the pods watcher is closed with exception [FLINK-16160] - Schema#proctime and Schema#rowtime don&#39;t work in TableEnvironment#connect code path Bug [FLINK-13689] - Rest High Level Client for Elasticsearch6.x connector leaks threads if no connection could be established [FLINK-14369] - KafkaProducerAtLeastOnceITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceCustomOperator fails on Travis [FLINK-14836] - Unable to set yarn container number for scala shell in yarn mode [FLINK-14894] - HybridOffHeapUnsafeMemorySegmentTest#testByteBufferWrap failed on Travis [FLINK-15758] - Investigate potential out-of-memory problems due to managed unsafe memory allocation [FLINK-15849] - Update SQL-CLIENT document from type to data-type [FLINK-16309] - ElasticSearch 7 connector is missing in SQL connector list [FLINK-16346] - BlobsCleanupITCase.testBlobServerCleanupCancelledJob fails on Travis [FLINK-16432] - Building Hive connector gives problems [FLINK-16451] - Fix IndexOutOfBoundsException for DISTINCT AGG with constants [FLINK-16510] - Task manager safeguard shutdown may not be reliable [FLINK-17092] - Pyflink test BlinkStreamDependencyTests is instable [FLINK-17322] - Enable latency tracker would corrupt the broadcast state [FLINK-17420] - Cannot alias Tuple and Row fields when converting DataStream to Table [FLINK-17466] - toRetractStream doesn&#39;t work correctly with Pojo conversion class [FLINK-17555] - docstring of pyflink.table.descriptors.FileSystem:1:duplicate object description of pyflink.table.descriptors.FileSystem [FLINK-17558] - Partitions are released in TaskExecutor Main Thread [FLINK-17562] - POST /jars/:jarid/plan is not working [FLINK-17578] - Union of 2 SideOutputs behaviour incorrect [FLINK-17639] - Document which FileSystems are supported by the StreamingFileSink [FLINK-17643] - LaunchCoordinatorTest fails [FLINK-17700] - The callback client of JavaGatewayServer should run in a daemon thread [FLINK-17744] - StreamContextEnvironment#execute cannot be call JobListener#onJobExecuted [FLINK-17763] - No log files when starting scala-shell [FLINK-17788] - scala shell in yarn mode is broken [FLINK-17800] - RocksDB optimizeForPointLookup results in missing time windows [FLINK-17801] - TaskExecutorTest.testHeartbeatTimeoutWithResourceManager timeout [FLINK-17809] - BashJavaUtil script logic does not work for paths with spaces [FLINK-17822] - Nightly Flink CLI end-to-end test failed with &quot;JavaGcCleanerWrapper$PendingCleanersRunner cannot access class jdk.internal.misc.SharedSecrets&quot; in Java 11 [FLINK-17870] - dependent jars are missing to be shipped to cluster in scala shell [FLINK-17891] - FlinkYarnSessionCli sets wrong execution.target type [FLINK-17959] - Exception: &quot;CANCELLED: call already cancelled&quot; is thrown when run python udf [FLINK-18008] - HistoryServer does not log environment information on startup [FLINK-18012] - Deactivate slot timeout if TaskSlotTable.tryMarkSlotActive is called [FLINK-18035] - Executors#newCachedThreadPool could not work as expected [FLINK-18045] - Fix Kerberos credentials checking to unblock Flink on secured MapR [FLINK-18048] - &quot;--host&quot; option could not take effect for standalone application cluster [FLINK-18097] - History server doesn&#39;t clean all job json files [FLINK-18168] - Error results when use UDAF with Object Array return type [FLINK-18223] - AvroSerializer does not correctly instantiate GenericRecord [FLINK-18241] - Custom OptionsFactory in user code not working when configured via flink-conf.yaml [FLINK-18242] - Custom OptionsFactory settings seem to have no effect on RocksDB [FLINK-18297] - SQL client: setting execution.type to invalid value shuts down the session [FLINK-18329] - Dist NOTICE issues [FLINK-18352] - org.apache.flink.core.execution.DefaultExecutorServiceLoader not thread safe [FLINK-18517] - kubernetes session test failed with &quot;java.net.SocketException: Broken pipe&quot; [FLINK-18539] - StreamExecutionEnvironment#addSource(SourceFunction, TypeInformation) doesn&#39;t use the user defined type information [FLINK-18595] - Deadlock during job shutdown [FLINK-18646] - Managed memory released check can block RPC thread [FLINK-18663] - RestServerEndpoint may prevent server shutdown [FLINK-18677] - ZooKeeperLeaderRetrievalService does not invalidate leader in case of SUSPENDED connection [FLINK-18702] - Flink elasticsearch connector leaks threads and classloaders thereof [FLINK-18815] - AbstractCloseableRegistryTest.testClose unstable [FLINK-18821] - Netty client retry mechanism may cause PartitionRequestClientFactory#createPartitionRequestClient to wait infinitely [FLINK-18859] - ExecutionGraphNotEnoughResourceTest.testRestartWithSlotSharingAndNotEnoughResources failed with &quot;Condition was not met in given timeout.&quot; [FLINK-18902] - Cannot serve results of asynchronous REST operations in per-job mode New Feature [FLINK-17844] - Activate japicmp-maven-plugin checks for @PublicEvolving between bug fix releases (x.y.u -&gt; x.y.v) Improvement [FLINK-16217] - SQL Client crashed when any uncatched exception is thrown [FLINK-16225] - Metaspace Out Of Memory should be handled as Fatal Error in TaskManager [FLINK-16619] - Misleading SlotManagerImpl logging for slot reports of unknown task manager [FLINK-16717] - Use headless service for rpc and blob port when flink on K8S [FLINK-17248] - Make the thread nums of io executor of ClusterEntrypoint and MiniCluster configurable [FLINK-17503] - Make memory configuration logging more user-friendly [FLINK-17819] - Yarn error unhelpful when forgetting HADOOP_CLASSPATH [FLINK-17920] - Add the Python example of Interval Join in Table API doc [FLINK-17945] - Improve error reporting of Python CI tests [FLINK-17970] - Increase default value of IO pool executor to 4 * #cores [FLINK-18010] - Add more logging to HistoryServer [FLINK-18501] - Mapping of Pluggable Filesystems to scheme is not properly logged [FLINK-18644] - Remove obsolete doc for hive connector [FLINK-18772] - Hide submit job web ui elements when running in per-job/application mode "}),e.add({id:165,href:"/2020/08/20/the-state-of-flink-on-docker/",title:"The State of Flink on Docker",section:"Flink Blog",content:`With over 50 million downloads from Docker Hub, the Flink docker images are a very popular deployment option.
 The Flink community recently put some effort into improving the Docker experience for our users with the goal to reduce confusion and improve usability.
 Let&rsquo;s quickly break down the recent improvements:
 Reduce confusion: Flink used to have 2 Dockerfiles and a 3rd file maintained outside of the official repository — all with different features and varying stability. Now, we have one central place for all images: apache/flink-docker.
@@ -3073,7 +3083,7 @@
 # 1: (optional) Download the Flink distribution, and unpack it wget https://archive.apache.org/dist/flink/flink-1.11.1/flink-1.11.1-bin-scala_2.12.tgz tar xf flink-1.11.1-bin-scala_2.12.tgz cd flink-1.11.1 # 2: Start the Flink job ./bin/flink run ./examples/streaming/TopSpeedWindowing.jar The main steps of the tutorial are also recorded in this short screencast:
 Next steps: Now that you&rsquo;ve successfully completed this tutorial, we recommend you checking out the full Flink on Docker documentation for implementing more advanced deployment scenarios, such as Job Clusters, Docker Compose or our native Kubernetes integration.
 Conclusion # We encourage all readers to try out Flink on Docker to provide the community with feedback to further improve the experience. Please refer to the user@flink.apache.org (remember to subscribe first) for general questions and our issue tracker for specific bugs or improvements, or ideas for contributions!
-`}),e.add({id:165,href:"/2020/08/18/monitoring-and-controlling-networks-of-iot-devices-with-flink-stateful-functions/",title:"Monitoring and Controlling Networks of IoT Devices with Flink Stateful Functions",section:"Flink Blog",content:`In this blog post, we&rsquo;ll take a look at a class of use cases that is a natural fit for Flink Stateful Functions: monitoring and controlling networks of connected devices (often called the “Internet of Things” (IoT)).
+`}),e.add({id:166,href:"/2020/08/18/monitoring-and-controlling-networks-of-iot-devices-with-flink-stateful-functions/",title:"Monitoring and Controlling Networks of IoT Devices with Flink Stateful Functions",section:"Flink Blog",content:`In this blog post, we&rsquo;ll take a look at a class of use cases that is a natural fit for Flink Stateful Functions: monitoring and controlling networks of connected devices (often called the “Internet of Things” (IoT)).
 IoT networks are composed of many individual, but interconnected components, which makes getting some kind of high-level insight into the status, problems, or optimization opportunities in these networks not trivial. Each individual device “sees” only its own state, which means that the status of groups of devices, or even the network as a whole, is often a complex aggregation of the individual devices’ state. Diagnosing, controlling, or optimizing these groups of devices thus requires distributed logic that analyzes the &ldquo;bigger picture&rdquo; and then acts upon it.
 A powerful approach to implement this is using digital twins: each device has a corresponding virtual entity (i.e. the digital twin), which also captures their relationships and interactions. The digital twins track the status of their corresponding devices and send updates to other twins, representing groups (such as geographical regions) of devices. Those, in turn, handle the logic to obtain the network&rsquo;s aggregated view, or this &ldquo;bigger picture&rdquo; we mentioned before.
 Our Scenario: Datacenter Monitoring and Alerting # Fig.1 An oversimplified view of a data center. There are many examples of the digital twins approach in the real world, such as smart grids of batteries, smart cities, or monitoring infrastructure software clusters. In this blogpost, we&rsquo;ll use the example of data center monitoring and alert correlation implemented with Stateful Functions.
@@ -3108,7 +3118,7 @@
 How does it really look? # ServerFun # This section associates a behaviour for every message that the function expects to be invoked with. The metricsHistory buffer is our sliding window of the last 15 minutes worth of ServerMetricReports. Note that this buffer is configured to expire entries 15 minutes after they were written. serverHealthState represents the current physical server state, open incidents and so on. Let&rsquo;s take a look at what happens when a ServerMetricReport message arrives:
 Retrieve the previously computed serverHealthState that is kept in state. Evaluate a model on the sliding window of the previous metric reports + the current metric reported + the previously computed server state to obtain an assessment of the current server health. If the server is not believed to be healthy, emit an alert via an alerts topic, and also send a message to our containing rack with all the open incidents that this server currently has. We'll omit the other handlers for brevity, but it's important to mention that onTimer makes sure that metric reports are coming in periodically, otherwise it'd trigger an alert stating that we didn’t hear from that server for a long time. RackFun # This function keeps a mapping between a ServerId and a set of open incidents on that server. When new alerts are received, this function tries to correlate the alert with any other open alerts on that rack. If a correlated rack alert is present, this function notifies the DataCenterFun about it. DataCenterFun # A persisted mapping between a RackId and the latest alert that rack reported. Throughout the usage of ingress/egress pairs, this function can report back its current view of the world of what racks are currently known to be unhealthy. An operator (via a front-end) can send a GetUnhealthyRacks message addressed to that DataCenterFun, and wait for the corresponding response message(UnhealthyRacks). Whenever a rack reports OK, it&rsquo;ll be removed from the unhealthy racks map. Conclusion # This pattern — where each layer of functions performs a stateful aggregation of events sent from the previous layer (or the input) — is useful for a whole class of problems. And, although we used connected devices to motivate this use case, it&rsquo;s not limited to the IoT domain.
 Stateful Functions provides the building blocks necessary for building complex distributed applications (here the digital twins that support analysis and interactions of the physical entities), while removing common complexities of distributed systems like service discovery, retires, circuit breakers, state management, scalability and similar challenges. If you&rsquo;d like to learn more about Stateful Functions, head over to the official documentation, where you can also find more hands-on tutorials to try out yourself!
-`}),e.add({id:166,href:"/2020/08/06/accelerating-your-workload-with-gpu-and-other-external-resources/",title:"Accelerating your workload with GPU and other external resources",section:"Flink Blog",content:`Apache Flink 1.11 introduces a new External Resource Framework, which allows you to request external resources from the underlying resource management systems (e.g., Kubernetes) and accelerate your workload with those resources. As Flink provides a first-party GPU plugin at the moment, we will take GPU as an example and show how it affects Flink applications in the AI field. Other external resources (e.g. RDMA and SSD) can also be supported in a pluggable manner.
+`}),e.add({id:167,href:"/2020/08/06/accelerating-your-workload-with-gpu-and-other-external-resources/",title:"Accelerating your workload with GPU and other external resources",section:"Flink Blog",content:`Apache Flink 1.11 introduces a new External Resource Framework, which allows you to request external resources from the underlying resource management systems (e.g., Kubernetes) and accelerate your workload with those resources. As Flink provides a first-party GPU plugin at the moment, we will take GPU as an example and show how it affects Flink applications in the AI field. Other external resources (e.g. RDMA and SSD) can also be supported in a pluggable manner.
 End-to-end real-time AI with GPU # Recently, AI and Machine Learning have gained additional popularity and have been widely used in various scenarios, such as personalized recommendation and image recognition. Flink, with the ability to support GPU allocation, can be used to build an end-to-end real-time AI workflow.
 Why Flink # Typical AI workloads fall into two categories: training and inference.
 Typical AI Workflow The training workload is usually a batch task, in which we train a model from a bounded dataset. On the other hand, the inference workload tends to be a streaming job. It consumes an unbounded data stream, which contains image data, for example, and uses a model to produce the output of predictions. Both workloads need to do data preprocessing first. Flink, as a unified batch and stream processing engine, can be used to build an end-to-end AI workflow naturally.
@@ -3125,7 +3135,7 @@
 With the external resource framework, you only need to implement a plugin that enables the operator to get the information for these external resources; see Custom Plugin for more details. If you just want to ensure that an external resource exists in the TaskManager, then you only need to find the configuration key of that resource in the underlying resource management system and configure the external resource framework accordingly.
 Conclusion # In the latest Flink release (Flink 1.11), an external resource framework has been introduced to support requesting various types of resources from the underlying resource management systems, and supply all the necessary information for using these resources to the operators. The first-party GPU plugin expands the application prospects of Flink in the AI domain. Different resource types can be supported in a pluggable way. You can also implement your own plugins for custom resource types.
 Future developments in this area include implementing operator level resource isolation and fine-grained external resource scheduling. The community may kick this work off once FLIP-56 is finished. If you have any suggestions or questions for the community, we encourage you to sign up to the Apache Flink mailing lists and join the discussion there.
-`}),e.add({id:167,href:"/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/",title:"PyFlink: The integration of Pandas into PyFlink",section:"Flink Blog",content:`Python has evolved into one of the most important programming languages for many fields of data processing. So big has been Python’s popularity, that it has pretty much become the default data processing language for data scientists. On top of that, there is a plethora of Python-based data processing tools such as NumPy, Pandas, and Scikit-learn that have gained additional popularity due to their flexibility or powerful functionalities.
+`}),e.add({id:168,href:"/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/",title:"PyFlink: The integration of Pandas into PyFlink",section:"Flink Blog",content:`Python has evolved into one of the most important programming languages for many fields of data processing. So big has been Python’s popularity, that it has pretty much become the default data processing language for data scientists. On top of that, there is a plethora of Python-based data processing tools such as NumPy, Pandas, and Scikit-learn that have gained additional popularity due to their flexibility or powerful functionalities.
 Pic source: VanderPlas 2017, slide 52. In an effort to meet the user needs and demands, the Flink community hopes to leverage and make better use of these tools. Along this direction, the Flink community put some great effort in integrating Pandas into PyFlink with the latest Flink version 1.11. Some of the added features include support for Pandas UDF and the conversion between Pandas DataFrame and Table. Pandas UDF not only greatly improve the execution performance of Python UDF, but also make it more convenient for users to leverage libraries such as Pandas and NumPy in Python UDF. Additionally, providing support for the conversion between Pandas DataFrame and Table enables users to switch processing engines seamlessly without the need for an intermediate connector. In the remainder of this article, we will introduce how these functionalities work and how to use them with a step-by-step example.
 Note Currently, only Scalar Pandas UDFs are supported in PyFlink. Pandas UDF in Flink 1.11 # Using scalar Python UDF was already possible in Flink 1.10 as described in a previous article on the Flink blog. Scalar Python UDFs work based on three primary steps:
 the Java operator serializes one input row to bytes and sends them to the Python worker;
@@ -3145,7 +3155,7 @@
 Finally, you can see the execution result on the command line. As you can see, all the temperature data with an empty value has been interpolated: $ cat /tmp/output 1,98.0 1,99.0 1,100.0 2,99.0 Conversion between PyFlink Table and Pandas DataFrame # You can use the from_pandas() method to create a PyFlink Table from a Pandas DataFrame or use the to_pandas() method to convert a PyFlink Table to a Pandas DataFrame.
 from pyflink.datastream import StreamExecutionEnvironment from pyflink.table import StreamTableEnvironment import pandas as pd import numpy as np env = StreamExecutionEnvironment.get_execution_environment() t_env = StreamTableEnvironment.create(env) # Create a PyFlink Table pdf = pd.DataFrame(np.random.rand(1000, 2)) table = t_env.from_pandas(pdf, [&#34;a&#34;, &#34;b&#34;]).filter(&#34;a &gt; 0.5&#34;) # Convert the PyFlink Table to a Pandas DataFrame pdf = table.to_pandas() print(pdf) Conclusion &amp; Upcoming work # In this article, we introduce the integration of Pandas in Flink 1.11, including Pandas UDF and the conversion between Table and Pandas. In fact, in the latest Apache Flink release, there are many excellent features added to PyFlink, such as support of User-defined Table functions and User-defined Metrics for Python UDFs. What’s more, from Flink 1.11, you can build PyFlink with Cython support and &ldquo;Cythonize&rdquo; your Python UDFs to substantially improve code execution speed (up to 30x faster, compared to Python UDFs in Flink 1.10).
 Future work by the community will focus on adding more features and bringing additional optimizations with follow up releases. Such optimizations and additions include a Python DataStream API and more integration with the Python ecosystem, such as support for distributed Pandas in Flink. Stay tuned for more information and updates with the upcoming releases!
-`}),e.add({id:168,href:"/2020/07/30/advanced-flink-application-patterns-vol.3-custom-window-processing/",title:"Advanced Flink Application Patterns Vol.3: Custom Window Processing",section:"Flink Blog",content:` Introduction # In the previous articles of the series, we described how you can achieve flexible stream partitioning based on dynamically-updated configurations (a set of fraud-detection rules) and how you can utilize Flink's Broadcast mechanism to distribute processing configuration at runtime among the relevant operators. Following up directly where we left the discussion of the end-to-end solution last time, in this article we will describe how you can use the &quot;Swiss knife&quot; of Flink - the Process Function to create an implementation that is tailor-made to match your streaming business logic requirements. Our discussion will continue in the context of the Fraud Detection engine. We will also demonstrate how you can implement your own custom replacement for time windows for cases where the out-of-the-box windowing available from the DataStream API does not satisfy your requirements. In particular, we will look at the trade-offs that you can make when designing a solution which requires low-latency reactions to individual events.
+`}),e.add({id:169,href:"/2020/07/30/advanced-flink-application-patterns-vol.3-custom-window-processing/",title:"Advanced Flink Application Patterns Vol.3: Custom Window Processing",section:"Flink Blog",content:` Introduction # In the previous articles of the series, we described how you can achieve flexible stream partitioning based on dynamically-updated configurations (a set of fraud-detection rules) and how you can utilize Flink's Broadcast mechanism to distribute processing configuration at runtime among the relevant operators. Following up directly where we left the discussion of the end-to-end solution last time, in this article we will describe how you can use the &quot;Swiss knife&quot; of Flink - the Process Function to create an implementation that is tailor-made to match your streaming business logic requirements. Our discussion will continue in the context of the Fraud Detection engine. We will also demonstrate how you can implement your own custom replacement for time windows for cases where the out-of-the-box windowing available from the DataStream API does not satisfy your requirements. In particular, we will look at the trade-offs that you can make when designing a solution which requires low-latency reactions to individual events.
 This article will describe some high-level concepts that can be applied independently, but it is recommended that you review the material in part one and part two of the series as well as checkout the code base in order to make it easier to follow along.
 ProcessFunction as a &ldquo;Window&rdquo; # Low Latency # Let&rsquo;s start with a reminder of the type of fraud detection rule that we would like to support:
 &ldquo;Whenever the sum of payments from the same payer to the same beneficiary within a 24 hour period is greater than 200 000 $ - trigger an alert.&rdquo;
@@ -3201,7 +3211,7 @@
 Convenient abstraction for working with this state, which is as simple as using a local variable
 Multi-threaded, parallel execution engine. ProcessFunction code runs in a single thread, without the need for synchronization. Flink handles all the parallel execution aspects and correct access to the shared state, without you, as a developer, having to think about it (concurrency is hard).
 All these aspects make it possible to build applications with Flink that go well beyond trivial streaming ETL use cases and enable implementation of arbitrarily-sophisticated, distributed event-driven applications. With Flink, you can rethink approaches to a wide range of use cases which normally would rely on using stateless parallel execution nodes and &ldquo;pushing&rdquo; the concerns of state fault tolerance to a database, an approach that is often destined to run into scalability issues in the face of ever-increasing data volumes.
-`}),e.add({id:169,href:"/2020/07/29/flink-community-update-july20/",title:"Flink Community Update - July'20",section:"Flink Blog",content:`As July draws to an end, we look back at a monthful of activity in the Flink community, including two releases (!) and some work around improving the first-time contribution experience in the project.
+`}),e.add({id:170,href:"/2020/07/29/flink-community-update-july20/",title:"Flink Community Update - July'20",section:"Flink Blog",content:`As July draws to an end, we look back at a monthful of activity in the Flink community, including two releases (!) and some work around improving the first-time contribution experience in the project.
 Also, events are starting to pick up again, so we&rsquo;ve put together a list of some great ones you can (virtually) attend in August!
 The Past Month in Flink # Flink Releases # Flink 1.11 # A couple of weeks ago, Flink 1.11 was announced in what was (again) the biggest Flink release to date (see &ldquo;A Look Into the Evolution of Flink Releases&rdquo;)! The new release brought significant improvements to usability as well as new features to Flink users across the API stack. Some highlights of Flink 1.11 are:
 Unaligned checkpoints to cope with high backpressure scenarios;
@@ -3232,7 +3242,7 @@
 Beam Summit (Aug. 24-29) Streaming, Fast and Slow
 Building Stateful Streaming Pipelines With Beam
 Blogposts Flink 1.11 Series Application Deployment in Flink: Current State and the new Application Mode Sharing is caring - Catalogs in Flink SQL (Tutorial) Flink SQL Demo: Building an End-to-End Streaming Application (Tutorial) Other Streaming analytics with Java and Apache Flink (Tutorial) Flink for online Machine Learning and real-time processing at Weibo Data-driven Matchmaking at Azar with Apache Flink Flink Packages Flink Packages is a website where you can explore (and contribute to) the Flink ecosystem of connectors, extensions, APIs, tools and integrations. New in: SignalFx Metrics Reporter Yauaa: Yet Another UserAgent Analyzer If you’d like to keep a closer eye on what’s happening in the community, subscribe to the Flink @community mailing list to get fine-grained weekly updates, upcoming event announcements and more.
-`}),e.add({id:170,href:"/2020/07/28/flink-sql-demo-building-an-end-to-end-streaming-application/",title:"Flink SQL Demo: Building an End-to-End Streaming Application",section:"Flink Blog",content:`Apache Flink 1.11 has released many exciting new features, including many developments in Flink SQL which is evolving at a fast pace. This article takes a closer look at how to quickly build streaming applications with Flink SQL from a practical point of view.
+`}),e.add({id:171,href:"/2020/07/28/flink-sql-demo-building-an-end-to-end-streaming-application/",title:"Flink SQL Demo: Building an End-to-End Streaming Application",section:"Flink Blog",content:`Apache Flink 1.11 has released many exciting new features, including many developments in Flink SQL which is evolving at a fast pace. This article takes a closer look at how to quickly build streaming applications with Flink SQL from a practical point of view.
 In the following sections, we describe how to integrate Kafka, MySQL, Elasticsearch, and Kibana with Flink SQL to analyze e-commerce user behavior in real-time. All exercises in this blogpost are performed in the Flink SQL CLI, and the entire process uses standard SQL syntax, without a single line of Java/Scala code or IDE installation. The final result of this demo is shown in the following figure:
 Preparation # Prepare a Linux or MacOS computer with Docker installed.
 Starting the Demo Environment # The components required in this demo are all managed in containers, so we will use docker-compose to start them. First, download the docker-compose.yml file that defines the demo environment, for example by running the following commands:
@@ -3273,13 +3283,13 @@
 As illustrated in the diagram, the categories of clothing and shoes exceed by far other categories on the e-commerce website.
 We have now implemented three practical applications and created charts for them. We can now return to the dashboard page and drag-and-drop each view to give our dashboard a more formal and intuitive style, as illustrated in the beginning of the blogpost. Of course, Kibana also provides a rich set of graphics and visualization features, and the user_behavior logs contain a lot more interesting information to explore. Using Flink SQL, you can analyze data in more dimensions, while using Kibana allows you to display more views and observe real-time changes in its charts!
 Summary # In the previous sections, we described how to use Flink SQL to integrate Kafka, MySQL, Elasticsearch, and Kibana to quickly build a real-time analytics application. The entire process can be completed using standard SQL syntax, without a line of Java or Scala code. We hope that this article provides some clear and practical examples of the convenience and power of Flink SQL, featuring an easy connection to various external systems, native support for event time and out-of-order handling, dimension table joins and a wide range of built-in functions. We hope you have fun following the examples in this blogpost!
-`}),e.add({id:171,href:"/2020/07/23/sharing-is-caring-catalogs-in-flink-sql/",title:"Sharing is caring - Catalogs in Flink SQL",section:"Flink Blog",content:"With an ever-growing number of people working with data, it&rsquo;s a common practice for companies to build self-service platforms with the goal of democratizing their access across different teams and — especially — to enable users from any background to be independent in their data needs. In such environments, metadata management becomes a crucial aspect. Without it, users often work blindly, spending too much time searching for datasets and their location, figuring out data formats and similar cumbersome tasks.\nIn this blog post, we want to give you a high level overview of catalogs in Flink. We&rsquo;ll describe why you should consider using them and what you can achieve with one in place. To round it up, we&rsquo;ll also showcase how simple it is to combine catalogs and Flink, in the form of an end-to-end example that you can try out yourself.\nWhy do I need a catalog? # Frequently, companies start building a data platform with a metastore, catalog, or schema registry of some sort already in place. Those let you clearly separate making the data available from consuming it. That separation has a few benefits:\nImproved productivity - The most obvious one. Making data reusable and shifting the focus on building new models/pipelines rather than data cleansing and discovery. Security - You can control the access to certain features of the data. For example, you can make the schema of the dataset publicly available, but limit the actual access to the underlying data only to particular teams. Compliance - If you have all the metadata in a central entity, it&rsquo;s much easier to ensure compliance with GDPR and similar regulations and legal requirements. What is stored in a catalog? # Almost all data sets can be described by certain properties that must be known in order to consume them. Those include:\nSchema - It describes the actual contents of the data, what columns it has, what are the constraints (e.g. keys) on which the updates should be performed, which fields can act as time attributes, what are the rules for watermark generation and so on.\nLocation - Does the data come from Kafka or a file in a filesystem? How do you connect to the external system? Which topic or file name do you use?\nFormat - Is the data serialized as JSON, CSV, or maybe Avro records?\nStatistics - You can also store additional information that can be useful when creating an execution plan of your query. For example, you can choose the best join algorithm, based on the number of rows in joined datasets.\nCatalogs don’t have to be limited to the metadata of datasets. You can usually store other objects that can be reused in different scenarios, such as:\nFunctions - It&rsquo;s very common to have domain specific functions that can be helpful in different use cases. Instead of having to create them in each place separately, you can just create them once and share them with others.\nQueries - Those can be useful when you don’t want to persist a data set, but want to provide a recipe for creating it from other sources instead.\nCatalogs support in Flink SQL # Starting from version 1.9, Flink has a set of Catalog APIs that allows to integrate Flink with various catalog implementations. With the help of those APIs, you can query tables in Flink that were created in your external catalogs (e.g. Hive Metastore). Additionally, depending on the catalog implementation, you can create new objects such as tables or views from Flink, reuse them across different jobs, and possibly even use them in other tools compatible with that catalog. In other words, you can see catalogs as having a two-fold purpose:\nProvide an out-of-the box integration with ecosystems such as RDBMSs or Hive that allows you to query external objects like tables, views, or functions with no additional connector configuration. The connector properties are automatically derived from the catalog itself.\nAct as a persistent store for Flink-specific metadata. In this mode, we additionally store connector properties alongside the logical metadata (e.g. schema, object name). That approach enables you to, for example, store a full definition of a Kafka-backed table with records serialized with Avro in Hive that can be later on used by Flink. However, as it incorporates Flink-specific properties, it can not be used by other tools that leverage Hive Metastore.\nAs of Flink 1.11, there are two catalog implementations supported by the community:\nA comprehensive Hive catalog\nA Postgres catalog (preview, read-only, for now)\nNote Flink does not store data at rest; it is a compute engine and requires other systems to consume input from and write its output. This means that Flink does not own the lifecycle of the data. Integration with Catalogs does not change that. Flink uses catalogs for metadata management only. All you need to do to start querying your tables defined in either of these metastores is to create the corresponding catalogs with connection parameters. Once this is done, you can use them the way you would in any relational database management system.\n-- create a catalog which gives access to the backing Postgres installation CREATE CATALOG postgres WITH ( &#39;type&#39;=&#39;jdbc&#39;, &#39;property-version&#39;=&#39;1&#39;, &#39;base-url&#39;=&#39;jdbc:postgresql://postgres:5432/&#39;, &#39;default-database&#39;=&#39;postgres&#39;, &#39;username&#39;=&#39;postgres&#39;, &#39;password&#39;=&#39;example&#39; ); -- create a catalog which gives access to the backing Hive installation CREATE CATALOG hive WITH ( &#39;type&#39;=&#39;hive&#39;, &#39;property-version&#39;=&#39;1&#39;, &#39;hive-version&#39;=&#39;2.3.6&#39;, &#39;hive-conf-dir&#39;=&#39;/opt/hive-conf&#39; ); After creating the catalogs, you can confirm that they are available to Flink and also list the databases or tables in each of these catalogs:\n&gt; show catalogs; default_catalog hive postgres -- switch the default catalog to Hive &gt; use catalog hive; &gt; show databases; default -- hive&#39;s default database &gt; show tables; dev_orders &gt; use catalog postgres; &gt; show tables; prod_customer prod_nation prod_rates prod_region region_stats -- desribe a schema of a table in Postgres, the Postgres types are automatically mapped to -- Flink&#39;s type system &gt; describe prod_customer root |-- c_custkey: INT NOT NULL |-- c_name: VARCHAR(25) NOT NULL |-- c_address: VARCHAR(40) NOT NULL |-- c_nationkey: INT NOT NULL |-- c_phone: CHAR(15) NOT NULL |-- c_acctbal: DOUBLE NOT NULL |-- c_mktsegment: CHAR(10) NOT NULL |-- c_comment: VARCHAR(117) NOT NULL Now that you know which tables are available, you can write your first query. In this scenario, we keep customer orders in Hive (dev_orders) because of their volume, and reference customer data in Postgres (prod_customer) to be able to easily update it. Let’s write a query that shows customers and their orders by region and order priority for a specific day.\nUSE CATALOG postgres; SELECT r_name AS `region`, o_orderpriority AS `priority`, COUNT(DISTINCT c_custkey) AS `number_of_customers`, COUNT(o_orderkey) AS `number_of_orders` FROM `hive`.`default`.dev_orders -- we need to fully qualify the table in hive because we set the -- current catalog to Postgres JOIN prod_customer ON o_custkey = c_custkey JOIN prod_nation ON c_nationkey = n_nationkey JOIN prod_region ON n_regionkey = r_regionkey WHERE FLOOR(o_ordertime TO DAY) = TIMESTAMP &#39;2020-04-01 0:00:00.000&#39; AND NOT o_orderpriority = &#39;4-NOT SPECIFIED&#39; GROUP BY r_name, o_orderpriority ORDER BY r_name, o_orderpriority; Flink&rsquo;s catalog support also covers storing Flink-specific objects in external catalogs that might not be fully usable by the corresponding external tools. The most notable use case for this is, for example, storing a table that describes a Kafka topic in a Hive catalog. Take the following DDL statement, that contains a watermark declaration as well as a set of connector properties that are not recognizable by Hive. You won&rsquo;t be able to query the table with Hive, but it will be persisted and can be reused by different Flink jobs.\nUSE CATALOG hive; CREATE TABLE prod_lineitem ( l_orderkey INTEGER, l_partkey INTEGER, l_suppkey INTEGER, l_linenumber INTEGER, l_quantity DOUBLE, l_extendedprice DOUBLE, l_discount DOUBLE, l_tax DOUBLE, l_currency STRING, l_returnflag STRING, l_linestatus STRING, l_ordertime TIMESTAMP(3), l_shipinstruct STRING, l_shipmode STRING, l_comment STRING, l_proctime AS PROCTIME(), WATERMARK FOR l_ordertime AS l_ordertime - INTERVAL &#39;5&#39; SECONDS ) WITH ( &#39;connector&#39;=&#39;kafka&#39;, &#39;topic&#39;=&#39;lineitem&#39;, &#39;scan.startup.mode&#39;=&#39;earliest-offset&#39;, &#39;properties.bootstrap.servers&#39;=&#39;kafka:9092&#39;, &#39;properties.group.id&#39;=&#39;testGroup&#39;, &#39;format&#39;=&#39;csv&#39;, &#39;csv.field-delimiter&#39;=&#39;|&#39; ); With prod_lineitem stored in Hive, you can now write a query that will enrich the incoming stream with static data kept in Postgres. To illustrate how this works, let&rsquo;s calculate the item prices based on the current currency rates:\nUSE CATALOG postgres; SELECT l_proctime AS `querytime`, l_orderkey AS `order`, l_linenumber AS `linenumber`, l_currency AS `currency`, rs_rate AS `cur_rate`, (l_extendedprice * (1 - l_discount) * (1 + l_tax)) / rs_rate AS `open_in_euro` FROM hive.`default`.prod_lineitem JOIN prod_rates FOR SYSTEM_TIME AS OF l_proctime ON rs_symbol = l_currency WHERE l_linestatus = &#39;O&#39;; The query above uses a SYSTEM AS OF clause for executing a temporal join. If you&rsquo;d like to learn more about the different kind of joins you can do in Flink I highly encourage you to check this documentation page.\nConclusion # Catalogs can be extremely powerful when building data platforms aimed at reusing the work of different teams in an organization. Centralizing the metadata is a common practice for improving productivity, security, and compliance when working with data.\nFlink provides flexible metadata management capabilities, that aim at reducing the cumbersome, repetitive work needed before querying the data such as defining schemas, connection properties etc. As of version 1.11, Flink provides a native, comprehensive integration with Hive Metastore and a read-only version for Postgres catalogs.\nYou can get started with Flink and catalogs by reading the docs. If you want to play around with Flink SQL (e.g. try out how catalogs work in Flink yourself), you can check this demo prepared by our colleagues Fabian and Timo — it runs in a dockerized environment, and we used it for the examples in this blog post.\n"}),e.add({id:172,href:"/2020/07/21/apache-flink-1.11.1-released/",title:"Apache Flink 1.11.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.11 series.
+`}),e.add({id:172,href:"/2020/07/23/sharing-is-caring-catalogs-in-flink-sql/",title:"Sharing is caring - Catalogs in Flink SQL",section:"Flink Blog",content:"With an ever-growing number of people working with data, it&rsquo;s a common practice for companies to build self-service platforms with the goal of democratizing their access across different teams and — especially — to enable users from any background to be independent in their data needs. In such environments, metadata management becomes a crucial aspect. Without it, users often work blindly, spending too much time searching for datasets and their location, figuring out data formats and similar cumbersome tasks.\nIn this blog post, we want to give you a high level overview of catalogs in Flink. We&rsquo;ll describe why you should consider using them and what you can achieve with one in place. To round it up, we&rsquo;ll also showcase how simple it is to combine catalogs and Flink, in the form of an end-to-end example that you can try out yourself.\nWhy do I need a catalog? # Frequently, companies start building a data platform with a metastore, catalog, or schema registry of some sort already in place. Those let you clearly separate making the data available from consuming it. That separation has a few benefits:\nImproved productivity - The most obvious one. Making data reusable and shifting the focus on building new models/pipelines rather than data cleansing and discovery. Security - You can control the access to certain features of the data. For example, you can make the schema of the dataset publicly available, but limit the actual access to the underlying data only to particular teams. Compliance - If you have all the metadata in a central entity, it&rsquo;s much easier to ensure compliance with GDPR and similar regulations and legal requirements. What is stored in a catalog? # Almost all data sets can be described by certain properties that must be known in order to consume them. Those include:\nSchema - It describes the actual contents of the data, what columns it has, what are the constraints (e.g. keys) on which the updates should be performed, which fields can act as time attributes, what are the rules for watermark generation and so on.\nLocation - Does the data come from Kafka or a file in a filesystem? How do you connect to the external system? Which topic or file name do you use?\nFormat - Is the data serialized as JSON, CSV, or maybe Avro records?\nStatistics - You can also store additional information that can be useful when creating an execution plan of your query. For example, you can choose the best join algorithm, based on the number of rows in joined datasets.\nCatalogs don’t have to be limited to the metadata of datasets. You can usually store other objects that can be reused in different scenarios, such as:\nFunctions - It&rsquo;s very common to have domain specific functions that can be helpful in different use cases. Instead of having to create them in each place separately, you can just create them once and share them with others.\nQueries - Those can be useful when you don’t want to persist a data set, but want to provide a recipe for creating it from other sources instead.\nCatalogs support in Flink SQL # Starting from version 1.9, Flink has a set of Catalog APIs that allows to integrate Flink with various catalog implementations. With the help of those APIs, you can query tables in Flink that were created in your external catalogs (e.g. Hive Metastore). Additionally, depending on the catalog implementation, you can create new objects such as tables or views from Flink, reuse them across different jobs, and possibly even use them in other tools compatible with that catalog. In other words, you can see catalogs as having a two-fold purpose:\nProvide an out-of-the box integration with ecosystems such as RDBMSs or Hive that allows you to query external objects like tables, views, or functions with no additional connector configuration. The connector properties are automatically derived from the catalog itself.\nAct as a persistent store for Flink-specific metadata. In this mode, we additionally store connector properties alongside the logical metadata (e.g. schema, object name). That approach enables you to, for example, store a full definition of a Kafka-backed table with records serialized with Avro in Hive that can be later on used by Flink. However, as it incorporates Flink-specific properties, it can not be used by other tools that leverage Hive Metastore.\nAs of Flink 1.11, there are two catalog implementations supported by the community:\nA comprehensive Hive catalog\nA Postgres catalog (preview, read-only, for now)\nNote Flink does not store data at rest; it is a compute engine and requires other systems to consume input from and write its output. This means that Flink does not own the lifecycle of the data. Integration with Catalogs does not change that. Flink uses catalogs for metadata management only. All you need to do to start querying your tables defined in either of these metastores is to create the corresponding catalogs with connection parameters. Once this is done, you can use them the way you would in any relational database management system.\n-- create a catalog which gives access to the backing Postgres installation CREATE CATALOG postgres WITH ( &#39;type&#39;=&#39;jdbc&#39;, &#39;property-version&#39;=&#39;1&#39;, &#39;base-url&#39;=&#39;jdbc:postgresql://postgres:5432/&#39;, &#39;default-database&#39;=&#39;postgres&#39;, &#39;username&#39;=&#39;postgres&#39;, &#39;password&#39;=&#39;example&#39; ); -- create a catalog which gives access to the backing Hive installation CREATE CATALOG hive WITH ( &#39;type&#39;=&#39;hive&#39;, &#39;property-version&#39;=&#39;1&#39;, &#39;hive-version&#39;=&#39;2.3.6&#39;, &#39;hive-conf-dir&#39;=&#39;/opt/hive-conf&#39; ); After creating the catalogs, you can confirm that they are available to Flink and also list the databases or tables in each of these catalogs:\n&gt; show catalogs; default_catalog hive postgres -- switch the default catalog to Hive &gt; use catalog hive; &gt; show databases; default -- hive&#39;s default database &gt; show tables; dev_orders &gt; use catalog postgres; &gt; show tables; prod_customer prod_nation prod_rates prod_region region_stats -- desribe a schema of a table in Postgres, the Postgres types are automatically mapped to -- Flink&#39;s type system &gt; describe prod_customer root |-- c_custkey: INT NOT NULL |-- c_name: VARCHAR(25) NOT NULL |-- c_address: VARCHAR(40) NOT NULL |-- c_nationkey: INT NOT NULL |-- c_phone: CHAR(15) NOT NULL |-- c_acctbal: DOUBLE NOT NULL |-- c_mktsegment: CHAR(10) NOT NULL |-- c_comment: VARCHAR(117) NOT NULL Now that you know which tables are available, you can write your first query. In this scenario, we keep customer orders in Hive (dev_orders) because of their volume, and reference customer data in Postgres (prod_customer) to be able to easily update it. Let’s write a query that shows customers and their orders by region and order priority for a specific day.\nUSE CATALOG postgres; SELECT r_name AS `region`, o_orderpriority AS `priority`, COUNT(DISTINCT c_custkey) AS `number_of_customers`, COUNT(o_orderkey) AS `number_of_orders` FROM `hive`.`default`.dev_orders -- we need to fully qualify the table in hive because we set the -- current catalog to Postgres JOIN prod_customer ON o_custkey = c_custkey JOIN prod_nation ON c_nationkey = n_nationkey JOIN prod_region ON n_regionkey = r_regionkey WHERE FLOOR(o_ordertime TO DAY) = TIMESTAMP &#39;2020-04-01 0:00:00.000&#39; AND NOT o_orderpriority = &#39;4-NOT SPECIFIED&#39; GROUP BY r_name, o_orderpriority ORDER BY r_name, o_orderpriority; Flink&rsquo;s catalog support also covers storing Flink-specific objects in external catalogs that might not be fully usable by the corresponding external tools. The most notable use case for this is, for example, storing a table that describes a Kafka topic in a Hive catalog. Take the following DDL statement, that contains a watermark declaration as well as a set of connector properties that are not recognizable by Hive. You won&rsquo;t be able to query the table with Hive, but it will be persisted and can be reused by different Flink jobs.\nUSE CATALOG hive; CREATE TABLE prod_lineitem ( l_orderkey INTEGER, l_partkey INTEGER, l_suppkey INTEGER, l_linenumber INTEGER, l_quantity DOUBLE, l_extendedprice DOUBLE, l_discount DOUBLE, l_tax DOUBLE, l_currency STRING, l_returnflag STRING, l_linestatus STRING, l_ordertime TIMESTAMP(3), l_shipinstruct STRING, l_shipmode STRING, l_comment STRING, l_proctime AS PROCTIME(), WATERMARK FOR l_ordertime AS l_ordertime - INTERVAL &#39;5&#39; SECONDS ) WITH ( &#39;connector&#39;=&#39;kafka&#39;, &#39;topic&#39;=&#39;lineitem&#39;, &#39;scan.startup.mode&#39;=&#39;earliest-offset&#39;, &#39;properties.bootstrap.servers&#39;=&#39;kafka:9092&#39;, &#39;properties.group.id&#39;=&#39;testGroup&#39;, &#39;format&#39;=&#39;csv&#39;, &#39;csv.field-delimiter&#39;=&#39;|&#39; ); With prod_lineitem stored in Hive, you can now write a query that will enrich the incoming stream with static data kept in Postgres. To illustrate how this works, let&rsquo;s calculate the item prices based on the current currency rates:\nUSE CATALOG postgres; SELECT l_proctime AS `querytime`, l_orderkey AS `order`, l_linenumber AS `linenumber`, l_currency AS `currency`, rs_rate AS `cur_rate`, (l_extendedprice * (1 - l_discount) * (1 + l_tax)) / rs_rate AS `open_in_euro` FROM hive.`default`.prod_lineitem JOIN prod_rates FOR SYSTEM_TIME AS OF l_proctime ON rs_symbol = l_currency WHERE l_linestatus = &#39;O&#39;; The query above uses a SYSTEM AS OF clause for executing a temporal join. If you&rsquo;d like to learn more about the different kind of joins you can do in Flink I highly encourage you to check this documentation page.\nConclusion # Catalogs can be extremely powerful when building data platforms aimed at reusing the work of different teams in an organization. Centralizing the metadata is a common practice for improving productivity, security, and compliance when working with data.\nFlink provides flexible metadata management capabilities, that aim at reducing the cumbersome, repetitive work needed before querying the data such as defining schemas, connection properties etc. As of version 1.11, Flink provides a native, comprehensive integration with Hive Metastore and a read-only version for Postgres catalogs.\nYou can get started with Flink and catalogs by reading the docs. If you want to play around with Flink SQL (e.g. try out how catalogs work in Flink yourself), you can check this demo prepared by our colleagues Fabian and Timo — it runs in a dockerized environment, and we used it for the examples in this blog post.\n"}),e.add({id:173,href:"/2020/07/21/apache-flink-1.11.1-released/",title:"Apache Flink 1.11.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.11 series.
 This release includes 44 fixes and minor improvements for Flink 1.11.0. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.11.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.11.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.11.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-15794] - Rethink default value of kubernetes.container.image [FLINK-18324] - Translate updated data type and function page into Chinese [FLINK-18387] - Translate &quot;BlackHole SQL Connector&quot; page into Chinese [FLINK-18388] - Translate &quot;CSV Format&quot; page into Chinese [FLINK-18391] - Translate &quot;Avro Format&quot; page into Chinese [FLINK-18395] - Translate &quot;ORC Format&quot; page into Chinese [FLINK-18469] - Add Application Mode to release notes. [FLINK-18524] - Scala varargs cause exception for new inference Bug [FLINK-15414] - KafkaITCase#prepare failed in travis [FLINK-16181] - IfCallGen will throw NPE for primitive types in blink [FLINK-16572] - CheckPubSubEmulatorTest is flaky on Azure [FLINK-17543] - Rerunning failed azure jobs fails when uploading logs [FLINK-17636] - SingleInputGateTest.testConcurrentReadStateAndProcessAndClose: Trying to read from released RecoveredInputChannel [FLINK-18097] - History server doesn&#39;t clean all job json files [FLINK-18419] - Can not create a catalog from user jar [FLINK-18434] - Can not select fields with JdbcCatalog [FLINK-18440] - ROW_NUMBER function: ROW/RANGE not allowed with RANK, DENSE_RANK or ROW_NUMBER functions [FLINK-18461] - Changelog source can&#39;t be insert into upsert sink [FLINK-18470] - Tests RocksKeyGroupsRocksSingleStateIteratorTest#testMergeIteratorByte &amp; RocksKeyGroupsRocksSingleStateIteratorTest#testMergeIteratorShort fail locally [FLINK-18471] - flink-runtime lists &quot;org.uncommons.maths:uncommons-maths:1.2.2a&quot; as a bundled dependency, but it isn&#39;t [FLINK-18477] - ChangelogSocketExample does not work [FLINK-18478] - AvroDeserializationSchema does not work with types generated by avrohugger [FLINK-18485] - Kerberized YARN per-job on Docker test failed during unzip jce_policy-8.zip [FLINK-18519] - Propagate exception to client when execution fails for REST submission [FLINK-18520] - New Table Function type inference fails [FLINK-18529] - Query Hive table and filter by timestamp partition can fail [FLINK-18539] - StreamExecutionEnvironment#addSource(SourceFunction, TypeInformation) doesn&#39;t use the user defined type information [FLINK-18573] - InfluxDB reporter cannot be loaded as plugin [FLINK-18583] - The _id field is incorrectly set to index in Elasticsearch6 DynamicTableSink [FLINK-18585] - Dynamic index can not work in new DynamicTableSink [FLINK-18591] - Fix the format issue for metrics web page Improvement [FLINK-18186] - Various updates on Kubernetes standalone document [FLINK-18422] - Update Prefer tag in documentation &#39;Fault Tolerance training lesson&#39; [FLINK-18457] - Fix invalid links in &quot;Detecting Patterns&quot; page of &quot;Streaming Concepts&quot; [FLINK-18472] - Local Installation Getting Started Guide [FLINK-18484] - RowSerializer arity error does not provide specific information about the mismatch [FLINK-18501] - Mapping of Pluggable Filesystems to scheme is not properly logged [FLINK-18526] - Add the configuration of Python UDF using Managed Memory in the doc of Pyflink [FLINK-18532] - Remove Beta tag from MATCH_RECOGNIZE docs [FLINK-18561] - Build manylinux1 with better compatibility instead of manylinux2014 Python Wheel Packages [FLINK-18593] - Hive bundle jar URLs are broken Test [FLINK-18534] - KafkaTableITCase.testKafkaDebeziumChangelogSource failed with &quot;Topic &#39;changelog_topic&#39; already exists&quot; Task [FLINK-18502] - Add the page &#39;legacySourceSinks.zh.md&#39; into the directory &#39;docs/dev/table&#39; [FLINK-18505] - Correct the content of &#39;sourceSinks.zh.md&#39; `}),e.add({id:173,href:"/2020/07/14/application-deployment-in-flink-current-state-and-the-new-application-mode/",title:"Application Deployment in Flink: Current State and the new Application Mode",section:"Flink Blog",content:`With the rise of stream processing and real-time analytics as a critical tool for modern businesses, an increasing number of organizations build platforms with Apache Flink at their core and offer it internally as a service. Many talks with related topics from companies like Uber, Netflix and Alibaba in the latest editions of Flink Forward further illustrate this trend.
+Sub-task [FLINK-15794] - Rethink default value of kubernetes.container.image [FLINK-18324] - Translate updated data type and function page into Chinese [FLINK-18387] - Translate &quot;BlackHole SQL Connector&quot; page into Chinese [FLINK-18388] - Translate &quot;CSV Format&quot; page into Chinese [FLINK-18391] - Translate &quot;Avro Format&quot; page into Chinese [FLINK-18395] - Translate &quot;ORC Format&quot; page into Chinese [FLINK-18469] - Add Application Mode to release notes. [FLINK-18524] - Scala varargs cause exception for new inference Bug [FLINK-15414] - KafkaITCase#prepare failed in travis [FLINK-16181] - IfCallGen will throw NPE for primitive types in blink [FLINK-16572] - CheckPubSubEmulatorTest is flaky on Azure [FLINK-17543] - Rerunning failed azure jobs fails when uploading logs [FLINK-17636] - SingleInputGateTest.testConcurrentReadStateAndProcessAndClose: Trying to read from released RecoveredInputChannel [FLINK-18097] - History server doesn&#39;t clean all job json files [FLINK-18419] - Can not create a catalog from user jar [FLINK-18434] - Can not select fields with JdbcCatalog [FLINK-18440] - ROW_NUMBER function: ROW/RANGE not allowed with RANK, DENSE_RANK or ROW_NUMBER functions [FLINK-18461] - Changelog source can&#39;t be insert into upsert sink [FLINK-18470] - Tests RocksKeyGroupsRocksSingleStateIteratorTest#testMergeIteratorByte &amp; RocksKeyGroupsRocksSingleStateIteratorTest#testMergeIteratorShort fail locally [FLINK-18471] - flink-runtime lists &quot;org.uncommons.maths:uncommons-maths:1.2.2a&quot; as a bundled dependency, but it isn&#39;t [FLINK-18477] - ChangelogSocketExample does not work [FLINK-18478] - AvroDeserializationSchema does not work with types generated by avrohugger [FLINK-18485] - Kerberized YARN per-job on Docker test failed during unzip jce_policy-8.zip [FLINK-18519] - Propagate exception to client when execution fails for REST submission [FLINK-18520] - New Table Function type inference fails [FLINK-18529] - Query Hive table and filter by timestamp partition can fail [FLINK-18539] - StreamExecutionEnvironment#addSource(SourceFunction, TypeInformation) doesn&#39;t use the user defined type information [FLINK-18573] - InfluxDB reporter cannot be loaded as plugin [FLINK-18583] - The _id field is incorrectly set to index in Elasticsearch6 DynamicTableSink [FLINK-18585] - Dynamic index can not work in new DynamicTableSink [FLINK-18591] - Fix the format issue for metrics web page Improvement [FLINK-18186] - Various updates on Kubernetes standalone document [FLINK-18422] - Update Prefer tag in documentation &#39;Fault Tolerance training lesson&#39; [FLINK-18457] - Fix invalid links in &quot;Detecting Patterns&quot; page of &quot;Streaming Concepts&quot; [FLINK-18472] - Local Installation Getting Started Guide [FLINK-18484] - RowSerializer arity error does not provide specific information about the mismatch [FLINK-18501] - Mapping of Pluggable Filesystems to scheme is not properly logged [FLINK-18526] - Add the configuration of Python UDF using Managed Memory in the doc of Pyflink [FLINK-18532] - Remove Beta tag from MATCH_RECOGNIZE docs [FLINK-18561] - Build manylinux1 with better compatibility instead of manylinux2014 Python Wheel Packages [FLINK-18593] - Hive bundle jar URLs are broken Test [FLINK-18534] - KafkaTableITCase.testKafkaDebeziumChangelogSource failed with &quot;Topic &#39;changelog_topic&#39; already exists&quot; Task [FLINK-18502] - Add the page &#39;legacySourceSinks.zh.md&#39; into the directory &#39;docs/dev/table&#39; [FLINK-18505] - Correct the content of &#39;sourceSinks.zh.md&#39; `}),e.add({id:174,href:"/2020/07/14/application-deployment-in-flink-current-state-and-the-new-application-mode/",title:"Application Deployment in Flink: Current State and the new Application Mode",section:"Flink Blog",content:`With the rise of stream processing and real-time analytics as a critical tool for modern businesses, an increasing number of organizations build platforms with Apache Flink at their core and offer it internally as a service. Many talks with related topics from companies like Uber, Netflix and Alibaba in the latest editions of Flink Forward further illustrate this trend.
 These platforms aim at simplifying application submission internally by lifting all the operational burden from the end user. To submit Flink applications, these platforms usually expose only a centralized or low-parallelism endpoint (e.g. a Web frontend) for application submission that we will call the Deployer.
 One of the roadblocks that platform developers and maintainers often mention is that the Deployer can be a heavy resource consumer that is difficult to provision for. Provisioning for average load can lead to the Deployer service being overwhelmed with deployment requests (in the worst case, for all production applications in a short period of time), while planning based on top load leads to unnecessary costs. Building on this observation, Flink 1.11 introduces the Application Mode as a deployment option, which allows for a lightweight, more scalable application submission process that manages to spread more evenly the application deployment load across the nodes in the cluster.
 In order to understand the problem and how the Application Mode solves it, we start by describing briefly the current status of application execution in Flink, before describing the architectural changes introduced by the new deployment mode and how to leverage them.
@@ -3318,7 +3328,7 @@
 Conclusion # We hope that this discussion helped you understand the differences between the various deployment modes offered by Flink and will help you to make informed decisions about which one is suitable in your own setup. Feel free to play around with them and report any issues you may find. If you have any questions or requests, do not hesitate to post them in the mailing lists and, hopefully, see you (virtually) at one of our conferences or meetups soon!
 The only exceptions are the Web Submission and the Standalone per-job implementation.&#160;&#x21a9;&#xfe0e;
 Support for Kubernetes will come soon.&#160;&#x21a9;&#xfe0e;
-`}),e.add({id:174,href:"/2020/07/06/apache-flink-1.11.0-release-announcement/",title:"Apache Flink 1.11.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is proud to announce the release of Flink 1.11.0! More than 200 contributors worked on over 1.3k issues to bring significant improvements to usability as well as new features to Flink users across the whole API stack. Some highlights that we&rsquo;re particularly excited about are:
+`}),e.add({id:175,href:"/2020/07/06/apache-flink-1.11.0-release-announcement/",title:"Apache Flink 1.11.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is proud to announce the release of Flink 1.11.0! More than 200 contributors worked on over 1.3k issues to bring significant improvements to usability as well as new features to Flink users across the whole API stack. Some highlights that we&rsquo;re particularly excited about are:
 The core engine is introducing unaligned checkpoints, a major change to Flink&rsquo;s fault tolerance mechanism that improves checkpointing performance under heavy backpressure.
 A new Source API that simplifies the implementation of (custom) sources by unifying batch and streaming execution, as well as offloading internals such as event-time handling, watermark generation or idleness detection to Flink.
 Flink SQL is introducing Support for Change Data Capture (CDC) to easily consume and interpret database changelogs from tools like Debezium. The renewed FileSystem Connector also expands the set of use cases and formats supported in the Table API/SQL, enabling scenarios like streaming data directly from Kafka to Hive.
@@ -3405,7 +3415,7 @@
 Release Notes # Please review the release notes carefully for a detailed list of changes and new features if you plan to upgrade your setup to Flink 1.11. This version is API-compatible with previous 1.x releases for APIs annotated with the @Public annotation.
 List of Contributors # The Apache Flink community would like to thank all the 200+ contributors that have made this release possible:
 Aitozi, Alexander Fedulov, Alexey Trenikhin, Aljoscha Krettek, Andrey Zagrebin, Arvid Heise, Ayush Saxena, Bairos, Bartosz Krasinski, Benchao Li, Benoit Hanotte, Benoît Paris, Bhagavan Das, Canbin Zheng, Cedric Chen, Chesnay Schepler, Colm O hEigeartaigh, Congxian Qiu, CrazyTomatoOo, Danish Amjad, Danny Chan, David Anderson, Dawid Wysakowicz, Dian Fu, Dominik Wosiński, Echo Lee, Ethan Marsh, Etienne Chauchot, Fabian Hueske, Fabian Paul, Flavio Pompermaier, Gao Yun, Gary Yao, Ghildiyal, Grebennikov Roman, GuoWei Ma, Guru Prasad, Gyula Fora, Hequn Cheng, Hu Guang, HuFeiHu, HuangXingBo, Igal Shilman, Ismael Juma, Jacob Sevart, Jark Wu, Jaskaran Bindra, Jason K, Jeff Yang, Jeff Zhang, Jerry Wang, Jiangjie (Becket) Qin, Jiayi, Jiayi Liao, Jiayi-Liao, Jincheng Sun, Jing Zhang, Jingsong Lee, JingsongLi, Jun Qin, JunZhang, Jörn Kottmann, Kevin Bohinski, Konstantin Knauf, Kostas Kloudas, Kurt Young, Leonard Xu, Lining Jing, Liupengcheng, LululuAlu, Marta Paes Moreira, Matt Welke, Max Kuklinski, Maximilian Michels, Nico Kruber, Niels Basjes, Oleksandr Nitavskyi, Paul Lam, Paul Lin, PengFei Li, PengchengLiu, Piotr Nowojski, Prem Santosh, Qingsheng Ren, Rafi Aroch, Raymond Farrelly, Richard Deurwaarder, Robert Metzger, RocMarshal, Roey Shem Tov, Roman, Roman Khachatryan, Rong Rong, RoyRuan, Rui Li, Seth Wiesman, Shaobin.Ou, Shengkai, Shuiqiang Chen, Shuo Cheng, Sivaprasanna, Sivaprasanna S, SteNicholas, Stefan Richter, Stephan Ewen, Steve OU, Steve Whelan, Tartarus, Terry Wang, Thomas Weise, Till Rohrmann, Timo Walther, TsReaper, Tzu-Li (Gordon) Tai, Victor Wong, Wei Zhong, Weike DONG, Xiaogang Zhou, Xintong Song, Xu Bai, Xuannan, Yadong Xie, Yang Wang, Yangze Guo, Yichao Yang, Ying, Yu Li, Yuan Mei, Yun Gao, Yun Tang, Yuval Itzchakov, Zakelly, Zhao, Zhenghua Gao, Zhijiang, Zhu Zhu, acqua.csq, austin ce, azagrebin, bdine, bowen.li, caoyingjie, caozhen, caozhen1937, chaojianok, chen, chendonglin, comsir, cpugputpu, czhang2, dianfu, edu05, eduardowt, fangliang, felixzheng, fmyblack, gauss, gk0916, godfrey he, godfreyhe, guliziduo, guowei.mgw, hehuiyuan, hequn8128, hpeter, huangxingbo, huzheng, ifndef-SleePy, jingwen-ywb, jrthe42, kevin.cyj, klion26, lamber-ken, leesf, libenchao, lijiewang.wlj, liuyongvs, lsy, lumen, machinedoll, mans2singh, molsionmo, oliveryunchang, openinx, paul8263, ptmagic, qqibrow, sev7e0, shuai-xu, shuai.xu, shuiqiangchen, snuyanzin, spafka, sunhaibotb, sunjincheng121, testfixer, tison, vinoyang, vthinkxie, wangtong, wangxianghu, wangxiyuan, wangxlong, wangyang0918, wenlong.lwl, whlwanghailong, william, windWheel, wooplevip, wuxuyang, xushiwei, xuyang1706, yanghua, yangyichao-mango, yuzhao.cyz, zentol, zhanglibing, zhangmang, zhangzhanchun, zhengcanbin, zhengshuli, zhenxianyimeng, zhijiang, zhongyong jin, zhule, zhuxiaoshang, zjuwangg, zoudan, zoudaokoulife, zzchun, “lzh576177775”, 骚sir, 厉颖, 张军, 曹建华, 漫步云端
-`}),e.add({id:175,href:"/2020/06/23/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-2/",title:"Flink on Zeppelin Notebooks for Interactive Data Analysis - Part 2",section:"Flink Blog",content:`In a previous post, we introduced the basics of Flink on Zeppelin and how to do Streaming ETL. In this second part of the &ldquo;Flink on Zeppelin&rdquo; series of posts, I will share how to perform streaming data visualization via Flink on Zeppelin and how to use Apache Flink UDFs in Zeppelin.
+`}),e.add({id:176,href:"/2020/06/23/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-2/",title:"Flink on Zeppelin Notebooks for Interactive Data Analysis - Part 2",section:"Flink Blog",content:`In a previous post, we introduced the basics of Flink on Zeppelin and how to do Streaming ETL. In this second part of the &ldquo;Flink on Zeppelin&rdquo; series of posts, I will share how to perform streaming data visualization via Flink on Zeppelin and how to use Apache Flink UDFs in Zeppelin.
 Streaming Data Visualization # With Zeppelin, you can build a real time streaming dashboard without writing any line of javascript/html/css code.
 Overall, Zeppelin supports 3 kinds of streaming data analytics:
 Single Mode Update Mode Append Mode Single Mode # Single mode is used for cases when the result of a SQL statement is always one row, such as the following example. The output format is translated in HTML, and you can specify a paragraph local property template for the final output content template. And you can use {i} as placeholder for the {i}th column of the result.
@@ -3414,14 +3424,14 @@
 UDF # SQL is a very powerful language, especially in expressing data flow. But most of the time, you need to handle complicated business logic that cannot be expressed by SQL. In these cases UDFs (user-defined functions) come particularly handy. In Zeppelin, you can write Scala or Python UDFs, while you can also import Scala, Python and Java UDFs. Here are 2 examples of Scala and Python UDFs:
 Scala UDF %flink class ScalaUpper extends ScalarFunction { def eval(str: String) = str.toUpperCase } btenv.registerFunction(&#34;scala_upper&#34;, new ScalaUpper()) Python UDF %flink.pyflink class PythonUpper(ScalarFunction): def eval(self, s): return s.upper() bt_env.register_function(&#34;python_upper&#34;, udf(PythonUpper(), DataTypes.STRING(), DataTypes.STRING())) After you define the UDFs, you can use them directly in SQL:
 Use Scala UDF in SQL Use Python UDF in SQL Summary # In this post, we explained how to perform streaming data visualization via Flink on Zeppelin and how to use UDFs. Besides that, you can do more in Zeppelin with Flink, such as batch processing, Hive integration and more. You can check the following articles for more details and here&rsquo;s a list of Flink on Zeppelin tutorial videos for your reference.
-References # Apache Zeppelin official website Flink on Zeppelin tutorials - Part 1 Flink on Zeppelin tutorials - Part 2 Flink on Zeppelin tutorials - Part 3 Flink on Zeppelin tutorials - Part 4 Flink on Zeppelin tutorial videos `}),e.add({id:176,href:"/2020/06/15/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-1/",title:"Flink on Zeppelin Notebooks for Interactive Data Analysis - Part 1",section:"Flink Blog",content:`The latest release of Apache Zeppelin comes with a redesigned interpreter for Apache Flink (version Flink 1.10+ is only supported moving forward) that allows developers to use Flink directly on Zeppelin notebooks for interactive data analysis. I wrote 2 posts about how to use Flink in Zeppelin. This is part-1 where I explain how the Flink interpreter in Zeppelin works, and provide a tutorial for running Streaming ETL with Flink on Zeppelin.
+References # Apache Zeppelin official website Flink on Zeppelin tutorials - Part 1 Flink on Zeppelin tutorials - Part 2 Flink on Zeppelin tutorials - Part 3 Flink on Zeppelin tutorials - Part 4 Flink on Zeppelin tutorial videos `}),e.add({id:177,href:"/2020/06/15/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-1/",title:"Flink on Zeppelin Notebooks for Interactive Data Analysis - Part 1",section:"Flink Blog",content:`The latest release of Apache Zeppelin comes with a redesigned interpreter for Apache Flink (version Flink 1.10+ is only supported moving forward) that allows developers to use Flink directly on Zeppelin notebooks for interactive data analysis. I wrote 2 posts about how to use Flink in Zeppelin. This is part-1 where I explain how the Flink interpreter in Zeppelin works, and provide a tutorial for running Streaming ETL with Flink on Zeppelin.
 The Flink Interpreter in Zeppelin 0.9 # The Flink interpreter can be accessed and configured from Zeppelin’s interpreter settings page. The interpreter has been refactored so that Flink users can now take advantage of Zeppelin to write Flink applications in three languages, namely Scala, Python (PyFlink) and SQL (for both batch &amp; streaming executions). Zeppelin 0.9 now comes with the Flink interpreter group, consisting of the below five interpreters:
 %flink - Provides a Scala environment %flink.pyflink - Provides a python environment %flink.ipyflink - Provides an ipython environment %flink.ssql - Provides a stream sql environment %flink.bsql - Provides a batch sql environment Not only has the interpreter been extended to support writing Flink applications in three languages, but it has also extended the available execution modes for Flink that now include:
 Running Flink in Local Mode Running Flink in Remote Mode Running Flink in Yarn Mode You can find more information about how to get started with Zeppelin and all the execution modes for Flink applications in Zeppelin notebooks in this post.
 Flink on Zeppelin for Stream processing # Performing stream processing jobs with Apache Flink on Zeppelin allows you to run most major streaming cases, such as streaming ETL and real time data analytics, with the use of Flink SQL and specific UDFs. Below we showcase how you can execute streaming ETL using Flink on Zeppelin:
 You can use Flink SQL to perform streaming ETL by following the steps below (for the full tutorial, please refer to the Flink Tutorial/Streaming ETL tutorial of the Zeppelin distribution):
 Step 1. Create source table to represent the source data. Step 2. Create a sink table to represent the processed data. Step 3. After creating the source and sink table, we can insert them to our statement to trigger the stream processing job as the following: Step 4. After initiating the streaming job, you can use another SQL statement to query the sink table to verify the results of your job. Here you can see the top 10 records which will be refreshed every 3 seconds. Summary # In this post, we explained how the redesigned Flink interpreter works in Zeppelin 0.9.0 and provided some examples for performing streaming ETL jobs with Flink and Zeppelin. In the next post, I will talk about how to do streaming data visualization via Flink on Zeppelin. Besides that, you can find an additional tutorial for batch processing with Flink on Zeppelin as well as using Flink on Zeppelin for more advance operations like resource isolation, job concurrency &amp; parallelism, multiple Hadoop &amp; Hive environments and more on our series of posts on Medium. And here&rsquo;s a list of Flink on Zeppelin tutorial videos for your reference.
-References # Apache Zeppelin official website Flink on Zeppelin tutorials - Part 1 Flink on Zeppelin tutorials - Part 2 Flink on Zeppelin tutorials - Part 3 Flink on Zeppelin tutorials - Part 4 Flink on Zeppelin tutorial videos `}),e.add({id:177,href:"/2020/06/10/flink-community-update-june20/",title:"Flink Community Update - June'20",section:"Flink Blog",content:`And suddenly it’s June. The previous month has been calm on the surface, but quite hectic underneath — the final testing phase for Flink 1.11 is moving at full speed, Stateful Functions 2.1 is out in the wild and Flink has made it into Google Season of Docs 2020.
+References # Apache Zeppelin official website Flink on Zeppelin tutorials - Part 1 Flink on Zeppelin tutorials - Part 2 Flink on Zeppelin tutorials - Part 3 Flink on Zeppelin tutorials - Part 4 Flink on Zeppelin tutorial videos `}),e.add({id:178,href:"/2020/06/10/flink-community-update-june20/",title:"Flink Community Update - June'20",section:"Flink Blog",content:`And suddenly it’s June. The previous month has been calm on the surface, but quite hectic underneath — the final testing phase for Flink 1.11 is moving at full speed, Stateful Functions 2.1 is out in the wild and Flink has made it into Google Season of Docs 2020.
 To top it off, a piece of good news: Flink Forward is back on October 19-22 as a free virtual event!
 The Past Month in Flink # Flink Stateful Functions 2.1 Release # It might seem like Stateful Functions 2.0 was announced only a handful of weeks ago (and it was!), but the Flink community has just released Stateful Functions 2.1! This release introduces two new features: state expiration for any kind of persisted state and support for UNIX Domain Sockets (UDS) to improve the performance of inter-container communication in co-located deployments; as well as other important changes that improve the overall stability and testability of the project. You can read the announcement blogpost for more details on the release!
 As the community around StateFun grows, the release cycle will follow this pattern of smaller and more frequent releases to incorporate user feedback and allow for faster iteration. If you’d like to get involved, we’re always looking for new contributors — especially around SDKs for other languages (e.g. Go, Rust, Javascript).
@@ -3440,7 +3450,7 @@
 Google Season of Docs 2020 # In the last update, we announced that Flink was applying to Google Season of Docs (GSoD) again this year. The good news: we’ve made it into the shortlist of accepted projects! This represents an invaluable opportunity for the Flink community to collaborate with technical writers to improve the Table API &amp; SQL documentation. We’re honored to have seen a great number of people reach out over the last couple of weeks, and look forward to receiving applications from this week on!
 If you’re interested in learning more about our project idea or want to get involved in GSoD as a technical writer, check out the announcement blogpost and submit your application. The deadline for GSoD applications is July 9th, 18:00 UTC.
 If you’d like to keep a closer eye on what’s happening in the community, subscribe to the Flink @community mailing list to get fine-grained weekly updates, upcoming event announcements and more.
-`}),e.add({id:178,href:"/2020/06/09/stateful-functions-2.1.0-release-announcement/",title:"Stateful Functions 2.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is happy to announce the release of Stateful Functions (StateFun) 2.1.0! This release introduces new features around state expiration and performance improvements for co-located deployments, as well as other important changes that improve the stability and testability of the project. As the community around StateFun grows, the release cycle will follow this pattern of smaller and more frequent releases to incorporate user feedback and allow for faster iteration.
+`}),e.add({id:179,href:"/2020/06/09/stateful-functions-2.1.0-release-announcement/",title:"Stateful Functions 2.1.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is happy to announce the release of Stateful Functions (StateFun) 2.1.0! This release introduces new features around state expiration and performance improvements for co-located deployments, as well as other important changes that improve the stability and testability of the project. As the community around StateFun grows, the release cycle will follow this pattern of smaller and more frequent releases to incorporate user feedback and allow for faster iteration.
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website, and the most recent Python SDK distribution is available on PyPI. For more details, check the complete release changelog and the updated documentation. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA!
 New Features and Improvements # Support for State Time-To-Live (TTL) # Being able to define state expiration and a state cleanup strategy is a useful feature for stateful applications — for example, to keep state size from growing indefinitely or to work with sensitive data. In previous StateFun versions, users could implement this behavior manually using delayed messages as state expiration callbacks. For StateFun 2.1, the community has worked on enabling users to configure any persisted state to expire and be purged after a given duration (i.e. the state time-to-live) (FLINK-17644, FLINK-17875).
 Persisted state can be configured to expire after the last write operation (AFTER_WRITE) or after the last read or write operation (AFTER_READ_AND_WRITE). For the Java SDK, users can configure State TTL in the definition of their persisted fields:
@@ -3455,13 +3465,13 @@
 List of Contributors # The Apache Flink community would like to thank all contributors that have made this release possible:
 abc863377, Authuir, Chesnay Schepler, Congxian Qiu, David Anderson, Dian Fu, Francesco Guardiani, Igal Shilman, Marta Paes Moreira, Patrick Wiener, Rafi Aroch, Seth Wiesman, Stephan Ewen, Tzu-Li (Gordon) Tai
 If you’d like to get involved, we’re always looking for new contributors — especially around SDKs for other languages like Go, Rust or Javascript.
-`}),e.add({id:179,href:"/2020/05/12/apache-flink-1.10.1-released/",title:"Apache Flink 1.10.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.10 series.
+`}),e.add({id:180,href:"/2020/05/12/apache-flink-1.10.1-released/",title:"Apache Flink 1.10.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.10 series.
 This release includes 158 fixes and minor improvements for Flink 1.10.0. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.10.1.
 Note FLINK-16684 changed the builders of the StreamingFileSink to make them compilable in Scala. This change is source compatible but binary incompatible. If using the StreamingFileSink, please recompile your user code against 1.10.1 before upgrading. Note FLINK-16683 Flink no longer supports starting clusters with .bat scripts. Users should instead use environments like WSL or Cygwin and work with the .sh scripts. Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.10.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-14126] - Elasticsearch Xpack Machine Learning doesn&#39;t support ARM [FLINK-15143] - Create document for FLIP-49 TM memory model and configuration guide [FLINK-15561] - Unify Kerberos credentials checking [FLINK-15790] - Make FlinkKubeClient and its implementations asynchronous [FLINK-15817] - Kubernetes Resource leak while deployment exception happens [FLINK-16049] - Remove outdated &quot;Best Practices&quot; section from Application Development Section [FLINK-16131] - Translate &quot;Amazon S3&quot; page of &quot;File Systems&quot; into Chinese [FLINK-16389] - Bump Kafka 0.10 to 0.10.2.2 Bug [FLINK-2336] - ArrayIndexOufOBoundsException in TypeExtractor when mapping [FLINK-10918] - incremental Keyed state with RocksDB throws cannot create directory error in windows [FLINK-11193] - Rocksdb timer service factory configuration option is not settable per job [FLINK-13483] - PrestoS3FileSystemITCase.testDirectoryListing fails on Travis [FLINK-14038] - ExecutionGraph deploy failed due to akka timeout [FLINK-14311] - Streaming File Sink end-to-end test failed on Travis [FLINK-14316] - Stuck in &quot;Job leader ... lost leadership&quot; error [FLINK-15417] - Remove the docker volume or mount when starting Mesos e2e cluster [FLINK-15669] - SQL client can&#39;t cancel flink job [FLINK-15772] - Shaded Hadoop S3A with credentials provider end-to-end test fails on travis [FLINK-15811] - StreamSourceOperatorWatermarksTest.testNoMaxWatermarkOnAsyncCancel fails on Travis [FLINK-15812] - HistoryServer archiving is done in Dispatcher main thread [FLINK-15838] - Dangling CountDownLatch.await(timeout) [FLINK-15852] - Job is submitted to the wrong session cluster [FLINK-15904] - Make Kafka Consumer work with activated &quot;disableGenericTypes()&quot; [FLINK-15936] - TaskExecutorTest#testSlotAcceptance deadlocks [FLINK-15953] - Job Status is hard to read for some Statuses [FLINK-16013] - List and map config options could not be parsed correctly [FLINK-16014] - S3 plugin ClassNotFoundException SAXParser [FLINK-16025] - Service could expose blob server port mismatched with JM Container [FLINK-16026] - Travis failed due to python setup [FLINK-16047] - Blink planner produces wrong aggregate results with state clean up [FLINK-16067] - Flink&#39;s CalciteParser swallows error position information [FLINK-16068] - table with keyword-escaped columns and computed_column_expression columns [FLINK-16070] - Blink planner can not extract correct unique key for UpsertStreamTableSink [FLINK-16108] - StreamSQLExample is failed if running in blink planner [FLINK-16111] - Kubernetes deployment does not respect &quot;taskmanager.cpu.cores&quot;. [FLINK-16113] - ExpressionReducer shouldn&#39;t escape the reduced string value [FLINK-16115] - Aliyun oss filesystem could not work with plugin mechanism [FLINK-16139] - Co-location constraints are not reset on task recovery in DefaultScheduler [FLINK-16161] - Statistics zero should be unknown in HiveCatalog [FLINK-16170] - SearchTemplateRequest ClassNotFoundException when use flink-sql-connector-elasticsearch7 [FLINK-16220] - JsonRowSerializationSchema throws cast exception : NullNode cannot be cast to ArrayNode [FLINK-16231] - Hive connector is missing jdk.tools exclusion against Hive 2.x.x [FLINK-16234] - Fix unstable cases in StreamingJobGraphGeneratorTest [FLINK-16241] - Remove the license and notice file in flink-ml-lib module on release-1.10 branch [FLINK-16242] - BinaryGeneric serialization error cause checkpoint failure [FLINK-16262] - Class loader problem with FlinkKafkaProducer.Semantic.EXACTLY_ONCE and usrlib directory [FLINK-16269] - Generic type can not be matched when convert table to stream. [FLINK-16281] - parameter &#39;maxRetryTimes&#39; can not work in JDBCUpsertTableSink [FLINK-16301] - Annoying &quot;Cannot find FunctionDefinition&quot; messages with SQL for f_proctime or = [FLINK-16308] - SQL connector download links are broken [FLINK-16313] - flink-state-processor-api: surefire execution unstable on Azure [FLINK-16331] - Remove source licenses for old WebUI [FLINK-16345] - Computed column can not refer time attribute column [FLINK-16360] - connector on hive 2.0.1 don&#39;t support type conversion from STRING to VARCHAR [FLINK-16371] - HadoopCompressionBulkWriter fails with &#39;java.io.NotSerializableException&#39; [FLINK-16373] - EmbeddedLeaderService: IllegalStateException: The RPC connection is already closed [FLINK-16413] - Reduce hive source parallelism when limit push down [FLINK-16414] - create udaf/udtf function using sql casuing ValidationException: SQL validation failed. null [FLINK-16433] - TableEnvironmentImpl doesn&#39;t clear buffered operations when it fails to translate the operation [FLINK-16435] - Replace since decorator with versionadd to mark the version an API was introduced [FLINK-16467] - MemorySizeTest#testToHumanReadableString() is not portable [FLINK-16526] - Fix exception when computed column expression references a keyword column name [FLINK-16541] - Document of table.exec.shuffle-mode is incorrect [FLINK-16550] - HadoopS3* tests fail with NullPointerException exceptions [FLINK-16560] - Forward Configuration in PackagedProgramUtils#getPipelineFromProgram [FLINK-16567] - Get the API error of the StreamQueryConfig on Page &quot;Query Configuration&quot; [FLINK-16573] - Kinesis consumer does not properly shutdown RecordFetcher threads [FLINK-16576] - State inconsistency on restore with memory state backends [FLINK-16626] - Prevent REST handler from being closed more than once [FLINK-16632] - SqlDateTimeUtils#toSqlTimestamp(String, String) may yield incorrect result [FLINK-16635] - Incompatible okio dependency in flink-metrics-influxdb module [FLINK-16646] - flink read orc file throw a NullPointerException [FLINK-16647] - Miss file extension when inserting to hive table with compression [FLINK-16652] - BytesColumnVector should init buffer in Hive 3.x [FLINK-16662] - Blink Planner failed to generate JobGraph for POJO DataStream converting to Table (Cannot determine simple type name) [FLINK-16664] - Unable to set DataStreamSource parallelism to default (-1) [FLINK-16675] - TableEnvironmentITCase. testClearOperation fails on travis nightly build [FLINK-16684] - StreamingFileSink builder does not work with Scala [FLINK-16696] - Savepoint trigger documentation is insufficient [FLINK-16703] - AkkaRpcActor state machine does not record transition to terminating state. [FLINK-16705] - LocalExecutor tears down MiniCluster before client can retrieve JobResult [FLINK-16718] - KvStateServerHandlerTest leaks Netty ByteBufs [FLINK-16727] - Fix cast exception when having time point literal as parameters [FLINK-16732] - Failed to call Hive UDF with constant return value [FLINK-16740] - OrcSplitReaderUtil::logicalTypeToOrcType fails to create decimal type with precision &lt; 10 [FLINK-16759] - HiveModuleTest failed to compile on release-1.10 [FLINK-16767] - Failed to read Hive table with RegexSerDe [FLINK-16771] - NPE when filtering by decimal column [FLINK-16821] - Run Kubernetes test failed with invalid named &quot;minikube&quot; [FLINK-16822] - The config set by SET command does not work [FLINK-16825] - PrometheusReporterEndToEndITCase should rely on path returned by DownloadCache [FLINK-16836] - Losing leadership does not clear rpc connection in JobManagerLeaderListener [FLINK-16860] - Failed to push filter into OrcTableSource when upgrading to 1.9.2 [FLINK-16888] - Re-add jquery license file under &quot;/licenses&quot; [FLINK-16901] - Flink Kinesis connector NOTICE should have contents of AWS KPL&#39;s THIRD_PARTY_NOTICES file manually merged in [FLINK-16913] - ReadableConfigToConfigurationAdapter#getEnum throws UnsupportedOperationException [FLINK-16916] - The logic of NullableSerializer#copy is wrong [FLINK-16944] - Compile error in. DumpCompiledPlanTest and PreviewPlanDumpTest [FLINK-16980] - Python UDF doesn&#39;t work with protobuf 3.6.1 [FLINK-16981] - flink-runtime tests are crashing the JVM on Java11 because of PowerMock [FLINK-17062] - Fix the conversion from Java row type to Python row type [FLINK-17066] - Update pyarrow version bounds less than 0.14.0 [FLINK-17093] - Python UDF doesn&#39;t work when the input column is from composite field [FLINK-17107] - CheckpointCoordinatorConfiguration#isExactlyOnce() is inconsistent with StreamConfig#getCheckpointMode() [FLINK-17114] - When the pyflink job runs in local mode and the command &quot;python&quot; points to Python 2.7, the startup of the Python UDF worker will fail. [FLINK-17124] - The PyFlink Job runs into infinite loop if the Python UDF imports job code [FLINK-17152] - FunctionDefinitionUtil generate wrong resultType and acc type of AggregateFunctionDefinition [FLINK-17308] - ExecutionGraphCache cachedExecutionGraphs not cleanup cause OOM Bug [FLINK-17313] - Validation error when insert decimal/varchar with precision into sink using TypeInformation of row [FLINK-17334] - Flink does not support HIVE UDFs with primitive return types [FLINK-17338] - LocalExecutorITCase.testBatchQueryCancel test timeout [FLINK-17359] - Entropy key is not resolved if flink-s3-fs-hadoop is added as a plugin [FLINK-17403] - Fix invalid classpath in BashJavaUtilsITCase [FLINK-17471] - Move LICENSE and NOTICE files to root directory of python distribution [FLINK-17483] - Update flink-sql-connector-elasticsearch7 NOTICE file to correctly reflect bundled dependencies [FLINK-17496] - Performance regression with amazon-kinesis-producer 0.13.1 in Flink 1.10.x [FLINK-17499] - LazyTimerService used to register timers via State Processing API incorrectly mixes event time timers with processing time timers [FLINK-17514] - TaskCancelerWatchdog does not kill TaskManager New Feature [FLINK-17275] - Add core training exercises Improvement [FLINK-9656] - Environment java opts for flink run [FLINK-15094] - Warning about using private constructor of java.nio.DirectByteBuffer in Java 11 [FLINK-15584] - Give nested data type of ROWs in ValidationException [FLINK-15616] - Move boot error messages from python-udf-boot.log to taskmanager&#39;s log file [FLINK-15989] - Rewrap OutOfMemoryError in allocateUnpooledOffHeap with better message [FLINK-16018] - Improve error reporting when submitting batch job (instead of AskTimeoutException) [FLINK-16125] - Make zookeeper.connect optional for Kafka connectors [FLINK-16167] - Update documentation about python shell execution [FLINK-16191] - Improve error message on Windows when RocksDB Paths are too long [FLINK-16280] - Fix sample code errors in the documentation about elasticsearch connector [FLINK-16288] - Setting the TTL for discarding task pods on Kubernetes. [FLINK-16293] - Document using plugins in Kubernetes [FLINK-16343] - Improve exception message when reading an unbounded source in batch mode [FLINK-16406] - Increase default value for JVM Metaspace to minimise its OutOfMemoryError [FLINK-16538] - Restructure Python Table API documentation [FLINK-16604] - Column key in JM configuration is too narrow [FLINK-16683] - Remove scripts for starting Flink on Windows [FLINK-16697] - Disable JMX rebinding [FLINK-16763] - Should not use BatchTableEnvironment for Python UDF in the document of flink-1.10 [FLINK-16772] - Bump derby to 10.12.1.1+ or exclude it [FLINK-16790] - enables the interpretation of backslash escapes [FLINK-16862] - Remove example url in quickstarts [FLINK-16874] - Respect the dynamic options when calculating memory options in taskmanager.sh [FLINK-16942] - ES 5 sink should allow users to select netty transport client [FLINK-17065] - Add documentation about the Python versions supported for PyFlink [FLINK-17125] - Add a Usage Notes Page to Answer Common Questions Encountered by PyFlink Users [FLINK-17254] - Improve the PyFlink documentation and examples to use SQL DDL for source/sink definition [FLINK-17276] - Add checkstyle to training exercises [FLINK-17277] - Apply IntelliJ recommendations to training exercises [FLINK-17278] - Add Travis to the training exercises [FLINK-17279] - Use gradle build scans for training exercises [FLINK-17316] - Have HourlyTips solutions use TumblingEventTimeWindows.of Task [FLINK-15741] - Fix TTL docs after enabling RocksDB compaction filter by default (needs Chinese translation) [FLINK-15933] - update content of how generic table schema is stored in hive via HiveCatalog [FLINK-15991] - Create Chinese documentation for FLIP-49 TM memory model [FLINK-16004] - Exclude flink-rocksdb-state-memory-control-test jars from the dist [FLINK-16454] - Update the copyright year in NOTICE files [FLINK-16530] - Add documentation about &quot;GROUPING SETS&quot; and &quot;CUBE&quot; support in streaming mode [FLINK-16592] - The doc of Streaming File Sink has a mistake of grammar `}),e.add({id:180,href:"/2020/05/06/flink-community-update-may20/",title:"Flink Community Update - May'20",section:"Flink Blog",content:`Can you smell it? It’s release month! It took a while, but now that we’re all caught up with the past, the Community Update is here to stay. This time around, we’re warming up for Flink 1.11 and peeping back to the month of April in the Flink community — with the release of Stateful Functions 2.0, a new self-paced Flink training and some efforts to improve the Flink documentation experience.
+Sub-task [FLINK-14126] - Elasticsearch Xpack Machine Learning doesn&#39;t support ARM [FLINK-15143] - Create document for FLIP-49 TM memory model and configuration guide [FLINK-15561] - Unify Kerberos credentials checking [FLINK-15790] - Make FlinkKubeClient and its implementations asynchronous [FLINK-15817] - Kubernetes Resource leak while deployment exception happens [FLINK-16049] - Remove outdated &quot;Best Practices&quot; section from Application Development Section [FLINK-16131] - Translate &quot;Amazon S3&quot; page of &quot;File Systems&quot; into Chinese [FLINK-16389] - Bump Kafka 0.10 to 0.10.2.2 Bug [FLINK-2336] - ArrayIndexOufOBoundsException in TypeExtractor when mapping [FLINK-10918] - incremental Keyed state with RocksDB throws cannot create directory error in windows [FLINK-11193] - Rocksdb timer service factory configuration option is not settable per job [FLINK-13483] - PrestoS3FileSystemITCase.testDirectoryListing fails on Travis [FLINK-14038] - ExecutionGraph deploy failed due to akka timeout [FLINK-14311] - Streaming File Sink end-to-end test failed on Travis [FLINK-14316] - Stuck in &quot;Job leader ... lost leadership&quot; error [FLINK-15417] - Remove the docker volume or mount when starting Mesos e2e cluster [FLINK-15669] - SQL client can&#39;t cancel flink job [FLINK-15772] - Shaded Hadoop S3A with credentials provider end-to-end test fails on travis [FLINK-15811] - StreamSourceOperatorWatermarksTest.testNoMaxWatermarkOnAsyncCancel fails on Travis [FLINK-15812] - HistoryServer archiving is done in Dispatcher main thread [FLINK-15838] - Dangling CountDownLatch.await(timeout) [FLINK-15852] - Job is submitted to the wrong session cluster [FLINK-15904] - Make Kafka Consumer work with activated &quot;disableGenericTypes()&quot; [FLINK-15936] - TaskExecutorTest#testSlotAcceptance deadlocks [FLINK-15953] - Job Status is hard to read for some Statuses [FLINK-16013] - List and map config options could not be parsed correctly [FLINK-16014] - S3 plugin ClassNotFoundException SAXParser [FLINK-16025] - Service could expose blob server port mismatched with JM Container [FLINK-16026] - Travis failed due to python setup [FLINK-16047] - Blink planner produces wrong aggregate results with state clean up [FLINK-16067] - Flink&#39;s CalciteParser swallows error position information [FLINK-16068] - table with keyword-escaped columns and computed_column_expression columns [FLINK-16070] - Blink planner can not extract correct unique key for UpsertStreamTableSink [FLINK-16108] - StreamSQLExample is failed if running in blink planner [FLINK-16111] - Kubernetes deployment does not respect &quot;taskmanager.cpu.cores&quot;. [FLINK-16113] - ExpressionReducer shouldn&#39;t escape the reduced string value [FLINK-16115] - Aliyun oss filesystem could not work with plugin mechanism [FLINK-16139] - Co-location constraints are not reset on task recovery in DefaultScheduler [FLINK-16161] - Statistics zero should be unknown in HiveCatalog [FLINK-16170] - SearchTemplateRequest ClassNotFoundException when use flink-sql-connector-elasticsearch7 [FLINK-16220] - JsonRowSerializationSchema throws cast exception : NullNode cannot be cast to ArrayNode [FLINK-16231] - Hive connector is missing jdk.tools exclusion against Hive 2.x.x [FLINK-16234] - Fix unstable cases in StreamingJobGraphGeneratorTest [FLINK-16241] - Remove the license and notice file in flink-ml-lib module on release-1.10 branch [FLINK-16242] - BinaryGeneric serialization error cause checkpoint failure [FLINK-16262] - Class loader problem with FlinkKafkaProducer.Semantic.EXACTLY_ONCE and usrlib directory [FLINK-16269] - Generic type can not be matched when convert table to stream. [FLINK-16281] - parameter &#39;maxRetryTimes&#39; can not work in JDBCUpsertTableSink [FLINK-16301] - Annoying &quot;Cannot find FunctionDefinition&quot; messages with SQL for f_proctime or = [FLINK-16308] - SQL connector download links are broken [FLINK-16313] - flink-state-processor-api: surefire execution unstable on Azure [FLINK-16331] - Remove source licenses for old WebUI [FLINK-16345] - Computed column can not refer time attribute column [FLINK-16360] - connector on hive 2.0.1 don&#39;t support type conversion from STRING to VARCHAR [FLINK-16371] - HadoopCompressionBulkWriter fails with &#39;java.io.NotSerializableException&#39; [FLINK-16373] - EmbeddedLeaderService: IllegalStateException: The RPC connection is already closed [FLINK-16413] - Reduce hive source parallelism when limit push down [FLINK-16414] - create udaf/udtf function using sql casuing ValidationException: SQL validation failed. null [FLINK-16433] - TableEnvironmentImpl doesn&#39;t clear buffered operations when it fails to translate the operation [FLINK-16435] - Replace since decorator with versionadd to mark the version an API was introduced [FLINK-16467] - MemorySizeTest#testToHumanReadableString() is not portable [FLINK-16526] - Fix exception when computed column expression references a keyword column name [FLINK-16541] - Document of table.exec.shuffle-mode is incorrect [FLINK-16550] - HadoopS3* tests fail with NullPointerException exceptions [FLINK-16560] - Forward Configuration in PackagedProgramUtils#getPipelineFromProgram [FLINK-16567] - Get the API error of the StreamQueryConfig on Page &quot;Query Configuration&quot; [FLINK-16573] - Kinesis consumer does not properly shutdown RecordFetcher threads [FLINK-16576] - State inconsistency on restore with memory state backends [FLINK-16626] - Prevent REST handler from being closed more than once [FLINK-16632] - SqlDateTimeUtils#toSqlTimestamp(String, String) may yield incorrect result [FLINK-16635] - Incompatible okio dependency in flink-metrics-influxdb module [FLINK-16646] - flink read orc file throw a NullPointerException [FLINK-16647] - Miss file extension when inserting to hive table with compression [FLINK-16652] - BytesColumnVector should init buffer in Hive 3.x [FLINK-16662] - Blink Planner failed to generate JobGraph for POJO DataStream converting to Table (Cannot determine simple type name) [FLINK-16664] - Unable to set DataStreamSource parallelism to default (-1) [FLINK-16675] - TableEnvironmentITCase. testClearOperation fails on travis nightly build [FLINK-16684] - StreamingFileSink builder does not work with Scala [FLINK-16696] - Savepoint trigger documentation is insufficient [FLINK-16703] - AkkaRpcActor state machine does not record transition to terminating state. [FLINK-16705] - LocalExecutor tears down MiniCluster before client can retrieve JobResult [FLINK-16718] - KvStateServerHandlerTest leaks Netty ByteBufs [FLINK-16727] - Fix cast exception when having time point literal as parameters [FLINK-16732] - Failed to call Hive UDF with constant return value [FLINK-16740] - OrcSplitReaderUtil::logicalTypeToOrcType fails to create decimal type with precision &lt; 10 [FLINK-16759] - HiveModuleTest failed to compile on release-1.10 [FLINK-16767] - Failed to read Hive table with RegexSerDe [FLINK-16771] - NPE when filtering by decimal column [FLINK-16821] - Run Kubernetes test failed with invalid named &quot;minikube&quot; [FLINK-16822] - The config set by SET command does not work [FLINK-16825] - PrometheusReporterEndToEndITCase should rely on path returned by DownloadCache [FLINK-16836] - Losing leadership does not clear rpc connection in JobManagerLeaderListener [FLINK-16860] - Failed to push filter into OrcTableSource when upgrading to 1.9.2 [FLINK-16888] - Re-add jquery license file under &quot;/licenses&quot; [FLINK-16901] - Flink Kinesis connector NOTICE should have contents of AWS KPL&#39;s THIRD_PARTY_NOTICES file manually merged in [FLINK-16913] - ReadableConfigToConfigurationAdapter#getEnum throws UnsupportedOperationException [FLINK-16916] - The logic of NullableSerializer#copy is wrong [FLINK-16944] - Compile error in. DumpCompiledPlanTest and PreviewPlanDumpTest [FLINK-16980] - Python UDF doesn&#39;t work with protobuf 3.6.1 [FLINK-16981] - flink-runtime tests are crashing the JVM on Java11 because of PowerMock [FLINK-17062] - Fix the conversion from Java row type to Python row type [FLINK-17066] - Update pyarrow version bounds less than 0.14.0 [FLINK-17093] - Python UDF doesn&#39;t work when the input column is from composite field [FLINK-17107] - CheckpointCoordinatorConfiguration#isExactlyOnce() is inconsistent with StreamConfig#getCheckpointMode() [FLINK-17114] - When the pyflink job runs in local mode and the command &quot;python&quot; points to Python 2.7, the startup of the Python UDF worker will fail. [FLINK-17124] - The PyFlink Job runs into infinite loop if the Python UDF imports job code [FLINK-17152] - FunctionDefinitionUtil generate wrong resultType and acc type of AggregateFunctionDefinition [FLINK-17308] - ExecutionGraphCache cachedExecutionGraphs not cleanup cause OOM Bug [FLINK-17313] - Validation error when insert decimal/varchar with precision into sink using TypeInformation of row [FLINK-17334] - Flink does not support HIVE UDFs with primitive return types [FLINK-17338] - LocalExecutorITCase.testBatchQueryCancel test timeout [FLINK-17359] - Entropy key is not resolved if flink-s3-fs-hadoop is added as a plugin [FLINK-17403] - Fix invalid classpath in BashJavaUtilsITCase [FLINK-17471] - Move LICENSE and NOTICE files to root directory of python distribution [FLINK-17483] - Update flink-sql-connector-elasticsearch7 NOTICE file to correctly reflect bundled dependencies [FLINK-17496] - Performance regression with amazon-kinesis-producer 0.13.1 in Flink 1.10.x [FLINK-17499] - LazyTimerService used to register timers via State Processing API incorrectly mixes event time timers with processing time timers [FLINK-17514] - TaskCancelerWatchdog does not kill TaskManager New Feature [FLINK-17275] - Add core training exercises Improvement [FLINK-9656] - Environment java opts for flink run [FLINK-15094] - Warning about using private constructor of java.nio.DirectByteBuffer in Java 11 [FLINK-15584] - Give nested data type of ROWs in ValidationException [FLINK-15616] - Move boot error messages from python-udf-boot.log to taskmanager&#39;s log file [FLINK-15989] - Rewrap OutOfMemoryError in allocateUnpooledOffHeap with better message [FLINK-16018] - Improve error reporting when submitting batch job (instead of AskTimeoutException) [FLINK-16125] - Make zookeeper.connect optional for Kafka connectors [FLINK-16167] - Update documentation about python shell execution [FLINK-16191] - Improve error message on Windows when RocksDB Paths are too long [FLINK-16280] - Fix sample code errors in the documentation about elasticsearch connector [FLINK-16288] - Setting the TTL for discarding task pods on Kubernetes. [FLINK-16293] - Document using plugins in Kubernetes [FLINK-16343] - Improve exception message when reading an unbounded source in batch mode [FLINK-16406] - Increase default value for JVM Metaspace to minimise its OutOfMemoryError [FLINK-16538] - Restructure Python Table API documentation [FLINK-16604] - Column key in JM configuration is too narrow [FLINK-16683] - Remove scripts for starting Flink on Windows [FLINK-16697] - Disable JMX rebinding [FLINK-16763] - Should not use BatchTableEnvironment for Python UDF in the document of flink-1.10 [FLINK-16772] - Bump derby to 10.12.1.1+ or exclude it [FLINK-16790] - enables the interpretation of backslash escapes [FLINK-16862] - Remove example url in quickstarts [FLINK-16874] - Respect the dynamic options when calculating memory options in taskmanager.sh [FLINK-16942] - ES 5 sink should allow users to select netty transport client [FLINK-17065] - Add documentation about the Python versions supported for PyFlink [FLINK-17125] - Add a Usage Notes Page to Answer Common Questions Encountered by PyFlink Users [FLINK-17254] - Improve the PyFlink documentation and examples to use SQL DDL for source/sink definition [FLINK-17276] - Add checkstyle to training exercises [FLINK-17277] - Apply IntelliJ recommendations to training exercises [FLINK-17278] - Add Travis to the training exercises [FLINK-17279] - Use gradle build scans for training exercises [FLINK-17316] - Have HourlyTips solutions use TumblingEventTimeWindows.of Task [FLINK-15741] - Fix TTL docs after enabling RocksDB compaction filter by default (needs Chinese translation) [FLINK-15933] - update content of how generic table schema is stored in hive via HiveCatalog [FLINK-15991] - Create Chinese documentation for FLIP-49 TM memory model [FLINK-16004] - Exclude flink-rocksdb-state-memory-control-test jars from the dist [FLINK-16454] - Update the copyright year in NOTICE files [FLINK-16530] - Add documentation about &quot;GROUPING SETS&quot; and &quot;CUBE&quot; support in streaming mode [FLINK-16592] - The doc of Streaming File Sink has a mistake of grammar `}),e.add({id:181,href:"/2020/05/06/flink-community-update-may20/",title:"Flink Community Update - May'20",section:"Flink Blog",content:`Can you smell it? It’s release month! It took a while, but now that we’re all caught up with the past, the Community Update is here to stay. This time around, we’re warming up for Flink 1.11 and peeping back to the month of April in the Flink community — with the release of Stateful Functions 2.0, a new self-paced Flink training and some efforts to improve the Flink documentation experience.
 Last month also marked the debut of Flink Forward Virtual Conference 2020: what did you think? If you missed it altogether or just want to recap some of the sessions, the videos and slides are now available!
 The Past Month in Flink # Flink Stateful Functions 2.0 is out! # In the beginning of April, the Flink community announced the release of Stateful Functions 2.0 — the first as part of the Apache Flink project. From this release, you can use Flink as the base of a (stateful) serverless platform with out-of-the-box consistent and scalable state, and efficient messaging between functions. You can even run your stateful functions on platforms like AWS Lambda, as Gordon (@tzulitai) demonstrated in his Flink Forward talk.
 It’s been encouraging to see so many questions about Stateful Functions popping up in the mailing list and Stack Overflow! If you’d like to get involved, we’re always looking for new contributors — especially around SDKs for other languages like Go, Javascript and Rust.
@@ -3491,7 +3501,7 @@
 If you’re interested in learning more about this project idea or want to get involved in GSoD as a technical writer, check out the announcement blogpost.
 &hellip;and something to read! # Events across the globe have pretty much come to a halt, so we’ll leave you with some interesting resources to read and explore instead. In addition to this written content, you can also recap the sessions from the Flink Forward Virtual Conference!
 Type Links Blogposts Event-Driven Supply Chain for Crisis with FlinkSQL and Zeppelin Memory Management Improvements with Apache Flink 1.10 Flink Serialization Tuning Vol. 1: Choosing your Serializer — if you can Tutorials PyFlink: Introducing Python Support for UDFs in Flink's Table API Flink Stateful Functions: where to start? Flink Packages Flink Packages is a website where you can explore (and contribute to) the Flink ecosystem of connectors, extensions, APIs, tools and integrations. New in: Spillable State Backend for Flink Flink Memory Calculator Ververica Platform Community Edition If you’d like to keep a closer eye on what’s happening in the community, subscribe to the Flink @community mailing list to get fine-grained weekly updates, upcoming event announcements and more.
-`}),e.add({id:181,href:"/2020/05/04/applying-to-google-season-of-docs-2020/",title:"Applying to Google Season of Docs 2020",section:"Flink Blog",content:`The Flink community is thrilled to share that the project is applying again to Google Season of Docs (GSoD) this year! If you’re unfamiliar with the program, GSoD is a great initiative organized by Google Open Source to pair technical writers with mentors to work on documentation for open source projects. The first edition supported over 40 projects, including some other cool Apache Software Foundation (ASF) members like Apache Airflow and Apache Cassandra.
+`}),e.add({id:182,href:"/2020/05/04/applying-to-google-season-of-docs-2020/",title:"Applying to Google Season of Docs 2020",section:"Flink Blog",content:`The Flink community is thrilled to share that the project is applying again to Google Season of Docs (GSoD) this year! If you’re unfamiliar with the program, GSoD is a great initiative organized by Google Open Source to pair technical writers with mentors to work on documentation for open source projects. The first edition supported over 40 projects, including some other cool Apache Software Foundation (ASF) members like Apache Airflow and Apache Cassandra.
 Why Apply? # As one of the most active projects in the ASF, Flink is experiencing a boom in contributions and some major changes to its codebase. And, while the project has also seen a significant increase in activity when it comes to writing, reviewing and translating documentation, it’s hard to keep up with the pace.
 Since last year, the community has been working on FLIP-42 to improve the documentation experience and bring a more accessible information architecture to Flink. After some discussion, we agreed that GSoD would be a valuable opportunity to double down on this effort and collaborate with someone who is passionate about technical writing&hellip;and Flink!
 How can you contribute? # If working shoulder to shoulder with the Flink community on documentation sounds exciting, we’d love to hear from you! You can read more about our idea for this year’s project below and, depending on whether it is accepted, apply as a technical writer. If you have any questions or just want to know more about the project idea, ping us at dev@flink.apache.org!
@@ -3508,13 +3518,13 @@
 How to Contribute Documentation: https://flink.apache.org/contributing/contribute-documentation.html
 Documentation Style Guide: https://flink.apache.org/contributing/docs-style.html
 We look forward to receiving feedback on this GSoD application and also to continue improving the documentation experience for Flink users. Join us!
-`}),e.add({id:182,href:"/2020/04/24/apache-flink-1.9.3-released/",title:"Apache Flink 1.9.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.9 series.
+`}),e.add({id:183,href:"/2020/04/24/apache-flink-1.9.3-released/",title:"Apache Flink 1.9.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.9 series.
 This release includes 38 fixes and minor improvements for Flink 1.9.2. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.9.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.9.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.9.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.9.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-15143] - Create document for FLIP-49 TM memory model and configuration guide [FLINK-16389] - Bump Kafka 0.10 to 0.10.2.2 Bug [FLINK-11193] - Rocksdb timer service factory configuration option is not settable per job [FLINK-14316] - Stuck in &quot;Job leader ... lost leadership&quot; error [FLINK-14560] - The value of taskmanager.memory.size in flink-conf.yaml is set to zero will cause taskmanager not to work [FLINK-15010] - Temp directories flink-netty-shuffle-* are not cleaned up [FLINK-15085] - HistoryServer dashboard config json out of sync [FLINK-15386] - SingleJobSubmittedJobGraphStore.putJobGraph has a logic error [FLINK-15575] - Azure Filesystem Shades Wrong Package &quot;httpcomponents&quot; [FLINK-15638] - releasing/create_release_branch.sh does not set version in flink-python/pyflink/version.py [FLINK-15812] - HistoryServer archiving is done in Dispatcher main thread [FLINK-15844] - Removal of JobWithJars.buildUserCodeClassLoader method without Configuration breaks backwards compatibility [FLINK-15863] - Fix docs stating that savepoints are relocatable [FLINK-16047] - Blink planner produces wrong aggregate results with state clean up [FLINK-16242] - BinaryGeneric serialization error cause checkpoint failure [FLINK-16308] - SQL connector download links are broken [FLINK-16373] - EmbeddedLeaderService: IllegalStateException: The RPC connection is already closed [FLINK-16573] - Kinesis consumer does not properly shutdown RecordFetcher threads [FLINK-16576] - State inconsistency on restore with memory state backends [FLINK-16696] - Savepoint trigger documentation is insufficient [FLINK-16703] - AkkaRpcActor state machine does not record transition to terminating state. [FLINK-16836] - Losing leadership does not clear rpc connection in JobManagerLeaderListener [FLINK-16860] - Failed to push filter into OrcTableSource when upgrading to 1.9.2 [FLINK-16916] - The logic of NullableSerializer#copy is wrong [FLINK-17062] - Fix the conversion from Java row type to Python row type Improvement [FLINK-14278] - Pass in ioExecutor into AbstractDispatcherResourceManagerComponentFactory [FLINK-15908] - Add description of support &#39;pip install&#39; to 1.9.x documents [FLINK-15909] - Add PyPI release process into the subsequent release of 1.9.x [FLINK-15938] - Idle state not cleaned in StreamingJoinOperator and StreamingSemiAntiJoinOperator [FLINK-16018] - Improve error reporting when submitting batch job (instead of AskTimeoutException) [FLINK-16031] - Improve the description in the README file of PyFlink 1.9.x [FLINK-16167] - Update documentation about python shell execution [FLINK-16280] - Fix sample code errors in the documentation about elasticsearch connector [FLINK-16697] - Disable JMX rebinding [FLINK-16862] - Remove example url in quickstarts [FLINK-16942] - ES 5 sink should allow users to select netty transport client Task [FLINK-11767] - Introduce new TypeSerializerUpgradeTestBase, new PojoSerializerUpgradeTest [FLINK-16454] - Update the copyright year in NOTICE files `}),e.add({id:183,href:"/2020/04/21/memory-management-improvements-with-apache-flink-1.10/",title:"Memory Management Improvements with Apache Flink 1.10",section:"Flink Blog",content:`Apache Flink 1.10 comes with significant changes to the memory model of the Task Managers and configuration options for your Flink applications. These recently-introduced changes make Flink more adaptable to all kinds of deployment environments (e.g. Kubernetes, Yarn, Mesos), providing strict control over its memory consumption. In this post, we describe Flink’s memory model, as it stands in Flink 1.10, how to set up and manage memory consumption of your Flink applications and the recent changes the community implemented in the latest Apache Flink release.
+Sub-task [FLINK-15143] - Create document for FLIP-49 TM memory model and configuration guide [FLINK-16389] - Bump Kafka 0.10 to 0.10.2.2 Bug [FLINK-11193] - Rocksdb timer service factory configuration option is not settable per job [FLINK-14316] - Stuck in &quot;Job leader ... lost leadership&quot; error [FLINK-14560] - The value of taskmanager.memory.size in flink-conf.yaml is set to zero will cause taskmanager not to work [FLINK-15010] - Temp directories flink-netty-shuffle-* are not cleaned up [FLINK-15085] - HistoryServer dashboard config json out of sync [FLINK-15386] - SingleJobSubmittedJobGraphStore.putJobGraph has a logic error [FLINK-15575] - Azure Filesystem Shades Wrong Package &quot;httpcomponents&quot; [FLINK-15638] - releasing/create_release_branch.sh does not set version in flink-python/pyflink/version.py [FLINK-15812] - HistoryServer archiving is done in Dispatcher main thread [FLINK-15844] - Removal of JobWithJars.buildUserCodeClassLoader method without Configuration breaks backwards compatibility [FLINK-15863] - Fix docs stating that savepoints are relocatable [FLINK-16047] - Blink planner produces wrong aggregate results with state clean up [FLINK-16242] - BinaryGeneric serialization error cause checkpoint failure [FLINK-16308] - SQL connector download links are broken [FLINK-16373] - EmbeddedLeaderService: IllegalStateException: The RPC connection is already closed [FLINK-16573] - Kinesis consumer does not properly shutdown RecordFetcher threads [FLINK-16576] - State inconsistency on restore with memory state backends [FLINK-16696] - Savepoint trigger documentation is insufficient [FLINK-16703] - AkkaRpcActor state machine does not record transition to terminating state. [FLINK-16836] - Losing leadership does not clear rpc connection in JobManagerLeaderListener [FLINK-16860] - Failed to push filter into OrcTableSource when upgrading to 1.9.2 [FLINK-16916] - The logic of NullableSerializer#copy is wrong [FLINK-17062] - Fix the conversion from Java row type to Python row type Improvement [FLINK-14278] - Pass in ioExecutor into AbstractDispatcherResourceManagerComponentFactory [FLINK-15908] - Add description of support &#39;pip install&#39; to 1.9.x documents [FLINK-15909] - Add PyPI release process into the subsequent release of 1.9.x [FLINK-15938] - Idle state not cleaned in StreamingJoinOperator and StreamingSemiAntiJoinOperator [FLINK-16018] - Improve error reporting when submitting batch job (instead of AskTimeoutException) [FLINK-16031] - Improve the description in the README file of PyFlink 1.9.x [FLINK-16167] - Update documentation about python shell execution [FLINK-16280] - Fix sample code errors in the documentation about elasticsearch connector [FLINK-16697] - Disable JMX rebinding [FLINK-16862] - Remove example url in quickstarts [FLINK-16942] - ES 5 sink should allow users to select netty transport client Task [FLINK-11767] - Introduce new TypeSerializerUpgradeTestBase, new PojoSerializerUpgradeTest [FLINK-16454] - Update the copyright year in NOTICE files `}),e.add({id:184,href:"/2020/04/21/memory-management-improvements-with-apache-flink-1.10/",title:"Memory Management Improvements with Apache Flink 1.10",section:"Flink Blog",content:`Apache Flink 1.10 comes with significant changes to the memory model of the Task Managers and configuration options for your Flink applications. These recently-introduced changes make Flink more adaptable to all kinds of deployment environments (e.g. Kubernetes, Yarn, Mesos), providing strict control over its memory consumption. In this post, we describe Flink’s memory model, as it stands in Flink 1.10, how to set up and manage memory consumption of your Flink applications and the recent changes the community implemented in the latest Apache Flink release.
 Introduction to Flink’s memory model # Having a clear understanding of Apache Flink’s memory model allows you to manage resources for the various workloads more efficiently. The following diagram illustrates the main memory components in Flink:
 Flink: Total Process Memory The Task Manager process is a JVM process. On a high level, its memory consists of the JVM Heap and Off-Heap memory. These types of memory are consumed by Flink directly or by JVM for its specific purposes (i.e. metaspace etc.). There are two major memory consumers within Flink: the user code of job operator tasks and the framework itself consuming memory for internal data structures, network buffers, etc.
 Please note that the user code has direct access to all memory types: JVM Heap, Direct and Native memory. Therefore, Flink cannot really control its allocation and usage. There are however two types of Off-Heap memory which are consumed by tasks and controlled explicitly by Flink:
@@ -3535,7 +3545,7 @@
 JVM metaspace requires additional memory. If you encounter OutOfMemoryError: Metaspace, Flink provides an option to increase its limit and the JVM will ensure that it is not exceeded.
 JVM requires more internal memory. There is no direct control over certain types of JVM process allocations but Flink provides JVM Overhead options. The options allow declaring an additional amount of memory, anticipated for those allocations and not covered by other options.
 Conclusion # The latest Flink release (Flink 1.10) introduces some significant changes to Flink’s memory configuration, making it possible to manage your application memory and debug Flink significantly better than before. Future developments in this area also include adopting a similar memory model for the job manager process in FLIP-116, so stay tuned for more additions and features in upcoming releases. If you have any suggestions or questions for the community, we encourage you to sign up to the Apache Flink mailing lists and become part of the discussion there.
-`}),e.add({id:184,href:"/2020/04/15/flink-serialization-tuning-vol.-1-choosing-your-serializer-if-you-can/",title:"Flink Serialization Tuning Vol. 1: Choosing your Serializer — if you can",section:"Flink Blog",content:`Almost every Flink job has to exchange data between its operators and since these records may not only be sent to another instance in the same JVM but instead to a separate process, records need to be serialized to bytes first. Similarly, Flink’s off-heap state-backend is based on a local embedded RocksDB instance which is implemented in native C++ code and thus also needs transformation into bytes on every state access. Wire and state serialization alone can easily cost a lot of your job’s performance if not executed correctly and thus, whenever you look into the profiler output of your Flink job, you will most likely see serialization in the top places for using CPU cycles.
+`}),e.add({id:185,href:"/2020/04/15/flink-serialization-tuning-vol.-1-choosing-your-serializer-if-you-can/",title:"Flink Serialization Tuning Vol. 1: Choosing your Serializer — if you can",section:"Flink Blog",content:`Almost every Flink job has to exchange data between its operators and since these records may not only be sent to another instance in the same JVM but instead to a separate process, records need to be serialized to bytes first. Similarly, Flink’s off-heap state-backend is based on a local embedded RocksDB instance which is implemented in native C++ code and thus also needs transformation into bytes on every state access. Wire and state serialization alone can easily cost a lot of your job’s performance if not executed correctly and thus, whenever you look into the profiler output of your Flink job, you will most likely see serialization in the top places for using CPU cycles.
 Since serialization is so crucial to your Flink job, we would like to highlight Flink’s serialization stack in a series of blog posts starting with looking at the different ways Flink can serialize your data types.
 Recap: Flink Serialization # Flink handles data types and serialization with its own type descriptors, generic type extraction, and type serialization framework. We recommend reading through the documentation first in order to be able to follow the arguments we present below. In essence, Flink tries to infer information about your job’s data types for wire and state serialization, and to be able to use grouping, joining, and aggregation operations by referring to individual field names, e.g. stream.keyBy(“ruleId”) or dataSet.join(another).where(&quot;name&quot;).equalTo(&quot;personName&quot;). It also allows optimizations in the serialization format as well as reducing unnecessary de/serializations (mainly in certain Batch operations as well as in the SQL/Table APIs).
 Choice of Serializer # Apache Flink&rsquo;s out-of-the-box serialization can be roughly divided into the following groups:
@@ -3581,7 +3591,7 @@
 Note As with all benchmarks, please bear in mind that these numbers only give a hint on Flink’s serializer performance in a specific scenario. They may be different with your data types but the rough classification is probably the same. If you want to be sure, please verify the results with your data types. You should be able to copy from \`SerializationFrameworkAllBenchmarks.java\` to set up your own micro-benchmarks or integrate different serialization benchmarks into your own tooling. Conclusion # In the sections above, we looked at how Flink performs serialization for different sorts of data types and elaborated the technical advantages and disadvantages. For data types used in Flink state, you probably want to leverage either POJO or Avro types which, currently, are the only ones supporting state evolution out of the box and allow your stateful application to develop over time. POJOs are usually faster in the de/serialization while Avro may support more flexible schema evolution and may integrate better with external systems. Please note, however, that you can use different serializers for external vs. internal components or even state vs. network communication.
 The fastest de/serialization is achieved with Flink’s internal tuple and row serializers which can access these types&rsquo; fields directly without going via reflection. With roughly 30% decreased throughput as compared to tuples, Protobuf and POJO types do not perform too badly on their own and are more flexible and maintainable. Avro (specific and generic) records as well as Thrift data types further reduce performance by 20% and 30%, respectively. You definitely want to avoid Kryo as that reduces throughput further by around 50% and more!
 The next article in this series will use this finding as a starting point to look into a few common pitfalls and obstacles of avoiding Kryo, how to get the most out of the PojoSerializer, and a few more tuning techniques with respect to serialization. Stay tuned for more.
-`}),e.add({id:185,href:"/2020/04/09/pyflink-introducing-python-support-for-udfs-in-flinks-table-api/",title:"PyFlink: Introducing Python Support for UDFs in Flink's Table API",section:"Flink Blog",content:`Flink 1.9 introduced the Python Table API, allowing developers and data engineers to write Python Table API jobs for Table transformations and analysis, such as Python ETL or aggregate jobs. However, Python users faced some limitations when it came to support for Python UDFs in Flink 1.9, preventing them from extending the system’s built-in functionality.
+`}),e.add({id:186,href:"/2020/04/09/pyflink-introducing-python-support-for-udfs-in-flinks-table-api/",title:"PyFlink: Introducing Python Support for UDFs in Flink's Table API",section:"Flink Blog",content:`Flink 1.9 introduced the Python Table API, allowing developers and data engineers to write Python Table API jobs for Table transformations and analysis, such as Python ETL or aggregate jobs. However, Python users faced some limitations when it came to support for Python UDFs in Flink 1.9, preventing them from extending the system’s built-in functionality.
 In Flink 1.10, the community further extended the support for Python by adding Python UDFs in PyFlink. Additionally, both the Python UDF environment and dependency management are now supported, allowing users to import third-party libraries in the UDFs, leveraging Python&rsquo;s rich set of third-party libraries.
 Python Support for UDFs in Flink 1.10 # Before diving into how you can define and use Python UDFs, we explain the motivation and background behind how UDFs work in PyFlink and provide some additional context about the implementation of our approach. Below we give a brief introduction on the PyFlink architecture from job submission, all the way to executing the Python UDF.
 The PyFlink architecture mainly includes two parts — local and cluster — as shown in the architecture visual below. The local phase is the compilation of the job, and the cluster is the execution of the job.
@@ -3603,7 +3613,7 @@
 @udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT()) def add(i, j): from mpmath import fadd # add third-party dependency return int(fadd(i, j)) To make it available on the worker node that does not contain the dependency, you can specify the dependencies with the following commands and API:
 $ cd /tmp $ echo mpmath==1.1.0 &gt; requirements.txt $ pip download -d cached_dir -r requirements.txt --no-binary :all: t_env.set_python_requirements(&#34;/tmp/requirements.txt&#34;, &#34;/tmp/cached_dir&#34;) A requirements.txt file that defines the third-party dependencies is used. If the dependencies cannot be accessed in the cluster, then you can specify a directory containing the installation packages of these dependencies by using the parameter &ldquo;requirements_cached_dir&rdquo;, as illustrated in the example above. The dependencies will be uploaded to the cluster and installed offline.
 Conclusion &amp; Upcoming work # In this blog post, we introduced the architecture of Python UDFs in PyFlink and provided some examples on how to define, register and invoke UDFs. Flink 1.10 brings Python support in the framework to new levels, allowing Python users to write even more magic with their preferred language. The community is actively working towards continuously improving the functionality and performance of PyFlink. Future work in upcoming releases will introduce support for Pandas UDFs in scalar and aggregate functions, add support to use Python UDFs through the SQL client to further expand the usage scope of Python UDFs, provide support for a Python ML Pipeline API and finally work towards even more performance improvements. The picture below provides more details on the roadmap for succeeding releases.
-`}),e.add({id:186,href:"/2020/04/07/stateful-functions-2.0-an-event-driven-database-on-apache-flink/",title:"Stateful Functions 2.0 - An Event-driven Database on Apache Flink",section:"Flink Blog",content:`Today, we are announcing the release of Stateful Functions (StateFun) 2.0 — the first release of Stateful Functions as part of the Apache Flink project. This release marks a big milestone: Stateful Functions 2.0 is not only an API update, but the first version of an event-driven database that is built on Apache Flink.
+`}),e.add({id:187,href:"/2020/04/07/stateful-functions-2.0-an-event-driven-database-on-apache-flink/",title:"Stateful Functions 2.0 - An Event-driven Database on Apache Flink",section:"Flink Blog",content:`Today, we are announcing the release of Stateful Functions (StateFun) 2.0 — the first release of Stateful Functions as part of the Apache Flink project. This release marks a big milestone: Stateful Functions 2.0 is not only an API update, but the first version of an event-driven database that is built on Apache Flink.
 Stateful Functions 2.0 makes it possible to combine StateFun’s powerful approach to state and composition with the elasticity, rapid scaling/scale-to-zero and rolling upgrade capabilities of FaaS implementations like AWS Lambda and modern resource orchestration frameworks like Kubernetes.
 With these features, Stateful Functions 2.0 addresses two of the most cited shortcomings of many FaaS setups today: consistent state and efficient messaging between functions.
 An Event-driven Database # When Stateful Functions joined Apache Flink at the beginning of this year, the project had started as a library on top of Flink to build general-purpose event-driven applications. Users would implement functions that receive and send messages, and maintain state in persistent variables. Flink provided the runtime with efficient exactly-once state and messaging. Stateful Functions 1.0 was a FaaS-inspired mix between stream processing and actor programming — on steroids.
@@ -3636,7 +3646,7 @@
 To follow the project and learn more, please check out these resources:
 Code: https://github.com/apache/flink-statefun Docs: //nightlies.apache.org/flink/flink-statefun-docs-release-2.0/ Apache Flink project site: https://flink.apache.org/ Apache Flink on Twitter: @ApacheFlink Stateful Functions Webpage: https://statefun.io Stateful Functions on Twitter: @StateFun_IO Thank you! # The Apache Flink community would like to thank all contributors that have made this release possible:
 David Anderson, Dian Fu, Igal Shilman, Seth Wiesman, Stephan Ewen, Tzu-Li (Gordon) Tai, hequn8128
-`}),e.add({id:187,href:"/2020/03/30/flink-community-update-april20/",title:"Flink Community Update - April'20",section:"Flink Blog",content:`While things slow down around us, the Apache Flink community is privileged to remain as active as ever. This blogpost combs through the past few months to give you an update on the state of things in Flink — from core releases to Stateful Functions; from some good old community stats to a new development blog.
+`}),e.add({id:188,href:"/2020/03/30/flink-community-update-april20/",title:"Flink Community Update - April'20",section:"Flink Blog",content:`While things slow down around us, the Apache Flink community is privileged to remain as active as ever. This blogpost combs through the past few months to give you an update on the state of things in Flink — from core releases to Stateful Functions; from some good old community stats to a new development blog.
 And since now it&rsquo;s more important than ever to keep up the spirits, we’d like to invite you to join the Flink Forward Virtual Conference, on April 22-24 (see Upcoming Events). Hope to see you there!
 The Year (so far) in Flink # Flink 1.10 Release # To kick off the new year, the Flink community released Flink 1.10 with the record contribution of over 200 engineers. This release introduced significant improvements to the overall performance and stability of Flink jobs, a preview of native Kubernetes integration and advances in Python support (PyFlink). Flink 1.10 also marked the completion of the Blink integration, hardening streaming SQL and bringing mature batch processing to Flink with production-ready Hive integration and TPC-DS coverage.
 The community is now discussing the release of Flink 1.10.1, covering some outstanding bugs from Flink 1.10.
@@ -3657,7 +3667,7 @@
 Upcoming Events # Flink Forward Virtual Conference # The organization of Flink Forward had to make the hard decision of cancelling this year’s event in San Francisco. But all is not lost! Flink Forward SF will be held online on April 22-24 and you can register (for free) here. Join the community for interactive talks and Q&amp;A sessions with core Flink contributors and companies like Splunk, Lyft, Netflix or Google.
 Others # Events across the globe have come to a halt due to the growing concerns around COVID-19, so this time we’ll leave you with some interesting content to read instead. In addition to this written content, you can also recap last year’s sessions from Flink Forward Berlin and Flink Forward China!
 Type Links Blogposts Replayable Process Functions: Time, Ordering, and Timers @Bird Application Log Intelligence & Performance Insights at Salesforce Using Flink @Salesforce State Unlocked: Interacting with State in Apache Flink Advanced Flink Application Patterns Vol.1: Case Study of a Fraud Detection System Advanced Flink Application Patterns Vol.2: Dynamic Updates of Application Logic Apache Beam: How Beam Runs on Top of Flink Flink as Unified Engine for Modern Data Warehousing: Production-Ready Hive Integration Tutorials Flink on Zeppelin — (Part 3). Streaming Streaming ETL with Apache Flink and Amazon Kinesis Data Analytics No Java Required: Configuring Sources and Sinks in SQL A Guide for Unit Testing in Apache Flink If you’d like to keep a closer eye on what’s happening in the community, subscribe to the Flink @community mailing list to get fine-grained weekly updates, upcoming event announcements and more.
-`}),e.add({id:188,href:"/2020/03/27/flink-as-unified-engine-for-modern-data-warehousing-production-ready-hive-integration/",title:"Flink as Unified Engine for Modern Data Warehousing: Production-Ready Hive Integration",section:"Flink Blog",content:`In this blog post, you will learn our motivation behind the Flink-Hive integration, and how Flink 1.10 can help modernize your data warehouse.
+`}),e.add({id:189,href:"/2020/03/27/flink-as-unified-engine-for-modern-data-warehousing-production-ready-hive-integration/",title:"Flink as Unified Engine for Modern Data Warehousing: Production-Ready Hive Integration",section:"Flink Blog",content:`In this blog post, you will learn our motivation behind the Flink-Hive integration, and how Flink 1.10 can help modernize your data warehouse.
 Introduction # What are some of the latest requirements for your data warehouse and data infrastructure in 2020?
 We’ve came up with some for you.
 Firstly, today’s business is shifting to a more real-time fashion, and thus demands abilities to process online streaming data with low latency for near-real-time or even real-time analytics. People become less and less tolerant of delays between when data is generated and when it arrives at their hands, ready to use. Hours or even days of delay is not acceptable anymore. Users are expecting minutes, or even seconds, of end-to-end latency for data in their warehouse, to get quicker-than-ever insights.
@@ -3688,7 +3698,7 @@
 Summary # Data warehousing is shifting to a more real-time fashion, and Apache Flink can make a difference for your organization in this space.
 Flink 1.10 brings production-ready Hive integration and empowers users to achieve more in both metadata management and unified/batch data processing.
 We encourage all our users to get their hands on Flink 1.10. You are very welcome to join the community in development, discussions, and all other kinds of collaborations in this topic.
-`}),e.add({id:189,href:"/2020/03/24/advanced-flink-application-patterns-vol.2-dynamic-updates-of-application-logic/",title:"Advanced Flink Application Patterns Vol.2: Dynamic Updates of Application Logic",section:"Flink Blog",content:`In the first article of the series, we gave a high-level description of the objectives and required functionality of a Fraud Detection engine. We also described how to make data partitioning in Apache Flink customizable based on modifiable rules instead of using a hardcoded KeysExtractor implementation.
+`}),e.add({id:190,href:"/2020/03/24/advanced-flink-application-patterns-vol.2-dynamic-updates-of-application-logic/",title:"Advanced Flink Application Patterns Vol.2: Dynamic Updates of Application Logic",section:"Flink Blog",content:`In the first article of the series, we gave a high-level description of the objectives and required functionality of a Fraud Detection engine. We also described how to make data partitioning in Apache Flink customizable based on modifiable rules instead of using a hardcoded KeysExtractor implementation.
 We intentionally omitted details of how the applied rules are initialized and what possibilities exist for updating them at runtime. In this post, we will address exactly these details. You will learn how the approach to data partitioning described in Part 1 can be applied in combination with a dynamic configuration. These two patterns, when used together, can eliminate the need to recompile the code and redeploy your Flink job for a wide range of modifications of the business logic.
 Rules Broadcasting # Let&rsquo;s first have a look at the previously-defined data-processing pipeline:
 DataStream&lt;Alert&gt; alerts = transactions .process(new DynamicKeyFunction()) .keyBy((keyed) -&gt; keyed.getKey()); .process(new DynamicAlertFunction()) DynamicKeyFunction provides dynamic data partitioning while DynamicAlertFunction is responsible for executing the main logic of processing transactions and sending alert messages according to defined rules.
@@ -3709,7 +3719,7 @@
 public class DynamicKeyFunction extends BroadcastProcessFunction&lt;Transaction, Rule, Keyed&lt;Transaction, String, Integer&gt;&gt; { @Override public void processBroadcastElement(Rule rule, Context ctx, Collector&lt;Keyed&lt;Transaction, String, Integer&gt;&gt; out) { BroadcastState&lt;Integer, Rule&gt; broadcastState = ctx.getBroadcastState(RULES_STATE_DESCRIPTOR); broadcastState.put(rule.getRuleId(), rule); } @Override public void processElement(Transaction event, ReadOnlyContext ctx, Collector&lt;Keyed&lt;Transaction, String, Integer&gt;&gt; out){ ReadOnlyBroadcastState&lt;Integer, Rule&gt; rulesState = ctx.getBroadcastState(RULES_STATE_DESCRIPTOR); for (Map.Entry&lt;Integer, Rule&gt; entry : rulesState.immutableEntries()) { final Rule rule = entry.getValue(); out.collect( new Keyed&lt;&gt;( event, KeysExtractor.getKey(rule.getGroupingKeyNames(), event), rule.getRuleId())); } } } In the above code, processElement() receives Transactions, and processBroadcastElement() receives Rule updates. When a new rule is created, it is distributed as depicted in Figure 6 and saved in all parallel instances of the operator using processBroadcastState. We use a Rule&rsquo;s ID as the key to store and reference individual rules. Instead of iterating over a hardcoded List&lt;Rules&gt;, we iterate over entries in the dynamically-updated broadcast state.
 DynamicAlertFunction follows the same logic with respect to storing the rules in the broadcast MapState. As described in Part 1, each message in the processElement input is intended to be processed by one specific rule and comes &ldquo;pre-marked&rdquo; with a corresponding ID by DynamicKeyFunction. All we need to do is retrieve the definition of the corresponding rule from BroadcastState by using the provided ID and process it according to the logic required by that rule. At this stage, we will also add messages to the internal function state in order to perform calculations on the required time window of data. We will consider how this is done in the final blog of the series about Fraud Detection.
 Summary # In this blog post, we continued our investigation of the use case of a Fraud Detection System built with Apache Flink. We looked into different ways in which data can be distributed between parallel operator instances and, most importantly, examined broadcast state. We demonstrated how dynamic partitioning — a pattern described in the first part of the series — can be combined and enhanced by the functionality provided by the broadcast state pattern. The ability to send dynamic updates at runtime is a powerful feature of Apache Flink that is applicable in a variety of other use cases, such as controlling state (cleanup/insert/fix), running A/B experiments or executing updates of ML model coefficients.
-`}),e.add({id:190,href:"/2020/02/22/apache-beam-how-beam-runs-on-top-of-flink/",title:"Apache Beam: How Beam Runs on Top of Flink",section:"Flink Blog",content:`Note: This blog post is based on the talk &ldquo;Beam on Flink: How Does It Actually Work?&rdquo;.
+`}),e.add({id:191,href:"/2020/02/22/apache-beam-how-beam-runs-on-top-of-flink/",title:"Apache Beam: How Beam Runs on Top of Flink",section:"Flink Blog",content:`Note: This blog post is based on the talk &ldquo;Beam on Flink: How Does It Actually Work?&rdquo;.
 Apache Flink and Apache Beam are open-source frameworks for parallel, distributed data processing at scale. Unlike Flink, Beam does not come with a full-blown execution engine of its own but plugs into other execution engines, such as Apache Flink, Apache Spark, or Google Cloud Dataflow. In this blog post we discuss the reasons to use Flink together with Beam for your batch and stream processing needs. We also take a closer look at how Beam works with Flink to provide an idea of the technical aspects of running Beam pipelines with Flink. We hope you find some useful information on how and why the two frameworks can be utilized in combination. For more information, you can refer to the corresponding documentation on the Beam website or contact the community through the Beam mailing list.
 What is Apache Beam # Apache Beam is an open-source, unified model for defining batch and streaming data-parallel processing pipelines. It is unified in the sense that you use a single API, in contrast to using a separate API for batch and streaming like it is the case in Flink. Beam was originally developed by Google which released it in 2014 as the Cloud Dataflow SDK. In 2016, it was donated to the Apache Software Foundation with the name of Beam. It has been developed by the open-source community ever since. With Apache Beam, developers can write data processing jobs, also known as pipelines, in multiple languages, e.g. Java, Python, Go, SQL. A pipeline is then executed by one of Beam’s Runners. A Runner is responsible for translating Beam pipelines such that they can run on an execution engine. Every supported execution engine has a Runner. The following Runners are available: Apache Flink, Apache Spark, Apache Samza, Hazelcast Jet, Google Cloud Dataflow, and others.
 The execution model, as well as the API of Apache Beam, are similar to Flink&rsquo;s. Both frameworks are inspired by the MapReduce, MillWheel, and Dataflow papers. Like Flink, Beam is designed for parallel, distributed data processing. Both have similar transformations, support for windowing, event/processing time, watermarks, timers, triggers, and much more. However, Beam not being a full runtime focuses on providing the framework for building portable, multi-language batch and stream processing pipelines such that they can be run across several execution engines. The idea is that you write your pipeline once and feed it with either batch or streaming data. When you run it, you just pick one of the supported backends to execute. A large integration test suite in Beam called &ldquo;ValidatesRunner&rdquo; ensures that the results will be the same, regardless of which backend you choose for the execution.
@@ -3739,7 +3749,7 @@
 Conclusion # Using Apache Beam with Apache Flink combines (a.) the power of Flink with (b.) the flexibility of Beam. All it takes to run Beam is a Flink cluster, which you may already have. Apache Beam&rsquo;s fully-fledged Python API is probably the most compelling argument for using Beam with Flink, but the unified API which allows to &ldquo;write-once&rdquo; and &ldquo;execute-anywhere&rdquo; is also very appealing to Beam users. On top of this, features like side inputs and a rich connector ecosystem are also reasons why people like Beam.
 With the introduction of schemas, a new format for handling type information, Beam is heading in a similar direction as Flink with its type system which is essential for the Table API or SQL. Speaking of, the next Flink release will include a Python version of the Table API which is based on the language portability of Beam. Looking ahead, the Beam community plans to extend the support for interactive programs like notebooks. TFX, which is built with Beam, is a very powerful way to solve many problems around training and validating machine learning models.
 For many years, Beam and Flink have inspired and learned from each other. With the Python support being based on Beam in Flink, they only seem to come closer to each other. That&rsquo;s all the better for the community, and also users have more options and functionality to choose from.
-`}),e.add({id:191,href:"/2020/02/20/no-java-required-configuring-sources-and-sinks-in-sql/",title:"No Java Required: Configuring Sources and Sinks in SQL",section:"Flink Blog",content:` Introduction # The recent Apache Flink 1.10 release includes many exciting features. In particular, it marks the end of the community&rsquo;s year-long effort to merge in the Blink SQL contribution from Alibaba. The reason the community chose to spend so much time on the contribution is that SQL works. It allows Flink to offer a truly unified interface over batch and streaming and makes stream processing accessible to a broad audience of developers and analysts. Best of all, Flink SQL is ANSI-SQL compliant, which means if you&rsquo;ve ever used a database in the past, you already know it1!
+`}),e.add({id:192,href:"/2020/02/20/no-java-required-configuring-sources-and-sinks-in-sql/",title:"No Java Required: Configuring Sources and Sinks in SQL",section:"Flink Blog",content:` Introduction # The recent Apache Flink 1.10 release includes many exciting features. In particular, it marks the end of the community&rsquo;s year-long effort to merge in the Blink SQL contribution from Alibaba. The reason the community chose to spend so much time on the contribution is that SQL works. It allows Flink to offer a truly unified interface over batch and streaming and makes stream processing accessible to a broad audience of developers and analysts. Best of all, Flink SQL is ANSI-SQL compliant, which means if you&rsquo;ve ever used a database in the past, you already know it1!
 A lot of work focused on improving runtime performance and progressively extending its coverage of the SQL standard. Flink now supports the full TPC-DS query set for batch queries, reflecting the readiness of its SQL engine to address the needs of modern data warehouse-like workloads. Its streaming SQL supports an almost equal set of features - those that are well defined on a streaming runtime - including complex joins and MATCH_RECOGNIZE.
 As important as this work is, the community also strives to make these features generally accessible to the broadest audience possible. That is why the Flink community is excited in 1.10 to offer production-ready DDL syntax (e.g., CREATE TABLE, DROP TABLE) and a refactored catalog interface.
 Accessing Your Data Where It Lives # Flink does not store data at rest; it is a compute engine and requires other systems to consume input from and write its output. Those that have used Flink&rsquo;s DataStream API in the past will be familiar with connectors that allow for interacting with external systems. Flink has a vast connector ecosystem that includes all major message queues, filesystems, and databases.
@@ -3752,7 +3762,7 @@
 The most notable catalog that Flink integrates with today is Hive Metastore. The Hive catalog allows Flink to fully interoperate with Hive and serve as a more efficient query engine. Flink supports reading and writing Hive tables, using Hive UDFs, and even leveraging Hive&rsquo;s metastore catalog to persist Flink specific metadata.
 Looking Ahead # Flink SQL has made enormous strides to democratize stream processing, and 1.10 marks a significant milestone in that development. However, we are not ones to rest on our laurels and, the community is committed to raising the bar on standards while lowering the barriers to entry. The community is looking to add more catalogs, such as JDBC and Apache Pulsar. We encourage you to sign up for the mailing list and stay on top of the announcements and new features in upcoming releases.
 My colleague Timo, whose worked on Flink SQL from the beginning, has the entire SQL standard printed on his desk and references it before any changes are merged. It&rsquo;s enormous.&#160;&#x21a9;&#xfe0e;
-`}),e.add({id:192,href:"/2020/02/11/apache-flink-1.10.0-release-announcement/",title:"Apache Flink 1.10.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to hit the double digits and announce the release of Flink 1.10.0! As a result of the biggest community effort to date, with over 1.2k issues implemented and more than 200 contributors, this release introduces significant improvements to the overall performance and stability of Flink jobs, a preview of native Kubernetes integration and great advances in Python support (PyFlink).
+`}),e.add({id:193,href:"/2020/02/11/apache-flink-1.10.0-release-announcement/",title:"Apache Flink 1.10.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is excited to hit the double digits and announce the release of Flink 1.10.0! As a result of the biggest community effort to date, with over 1.2k issues implemented and more than 200 contributors, this release introduces significant improvements to the overall performance and stability of Flink jobs, a preview of native Kubernetes integration and great advances in Python support (PyFlink).
 Flink 1.10 also marks the completion of the Blink integration, hardening streaming SQL and bringing mature batch processing to Flink with production-ready Hive integration and TPC-DS coverage. This blog post describes all major new features and improvements, important changes to be aware of and what to expect moving forward.
 The binary distribution and source artifacts are now available on the updated Downloads page of the Flink website. For more details, check the complete release changelog and the updated documentation. We encourage you to download the release and share your feedback with the community through the Flink mailing lists or JIRA.
 New Features and Improvements # Improved Memory Management and Configuration # The current TaskExecutor memory configuration in Flink has some shortcomings that make it hard to reason about or optimize resource utilization, such as:
@@ -3804,7 +3814,7 @@
 Release Notes # Please review the release notes carefully for a detailed list of changes and new features if you plan to upgrade your setup to Flink 1.10. This version is API-compatible with previous 1.x releases for APIs annotated with the @Public annotation.
 List of Contributors # The Apache Flink community would like to thank all contributors that have made this release possible:
 Achyuth Samudrala, Aitozi, Alberto Romero, Alec.Ch, Aleksey Pak, Alexander Fedulov, Alice Yan, Aljoscha Krettek, Aloys, Andrey Zagrebin, Arvid Heise, Benchao Li, Benoit Hanotte, Benoît Paris, Bhagavan Das, Biao Liu, Chesnay Schepler, Congxian Qiu, Cyrille Chépélov, César Soto Valero, David Anderson, David Hrbacek, David Moravek, Dawid Wysakowicz, Dezhi Cai, Dian Fu, Dyana Rose, Eamon Taaffe, Fabian Hueske, Fawad Halim, Fokko Driesprong, Frey Gao, Gabor Gevay, Gao Yun, Gary Yao, GatsbyNewton, GitHub, Grebennikov Roman, GuoWei Ma, Gyula Fora, Haibo Sun, Hao Dang, Henvealf, Hongtao Zhang, HuangXingBo, Hwanju Kim, Igal Shilman, Jacob Sevart, Jark Wu, Jeff Martin, Jeff Yang, Jeff Zhang, Jiangjie (Becket) Qin, Jiayi, Jiayi Liao, Jincheng Sun, Jing Zhang, Jingsong Lee, JingsongLi, Joao Boto, John Lonergan, Kaibo Zhou, Konstantin Knauf, Kostas Kloudas, Kurt Young, Leonard Xu, Ling Wang, Lining Jing, Liupengcheng, LouisXu, Mads Chr. Olesen, Marco Zühlke, Marcos Klein, Matyas Orhidi, Maximilian Bode, Maximilian Michels, Nick Pavlakis, Nico Kruber, Nicolas Deslandes, Pablo Valtuille, Paul Lam, Paul Lin, PengFei Li, Piotr Nowojski, Piotr Przybylski, Piyush Narang, Ricco Chen, Richard Deurwaarder, Robert Metzger, Roman, Roman Grebennikov, Roman Khachatryan, Rong Rong, Rui Li, Ryan Tao, Scott Kidder, Seth Wiesman, Shannon Carey, Shaobin.Ou, Shuo Cheng, Stefan Richter, Stephan Ewen, Steve OU, Steven Wu, Terry Wang, Thesharing, Thomas Weise, Till Rohrmann, Timo Walther, Tony Wei, TsReaper, Tzu-Li (Gordon) Tai, Victor Wong, WangHengwei, Wei Zhong, WeiZhong94, Wind (Jiayi Liao), Xintong Song, XuQianJin-Stars, Xuefu Zhang, Xupingyong, Yadong Xie, Yang Wang, Yangze Guo, Yikun Jiang, Ying, YngwieWang, Yu Li, Yuan Mei, Yun Gao, Yun Tang, Zhanchun Zhang, Zhenghua Gao, Zhijiang, Zhu Zhu, a-suiniaev, azagrebin, beyond1920, biao.liub, blueszheng, bowen.li, caoyingjie, catkint, chendonglin, chenqi, chunpinghe, cyq89051127, danrtsey.wy, dengziming, dianfu, eskabetxe, fanrui, forideal, gentlewang, godfrey he, godfreyhe, haodang, hehuiyuan, hequn8128, hpeter, huangxingbo, huzheng, ifndef-SleePy, jiemotongxue, joe, jrthe42, kevin.cyj, klion26, lamber-ken, libenchao, liketic, lincoln-lil, lining, liuyongvs, liyafan82, lz, mans2singh, mojo, openinx, ouyangwulin, shining-huang, shuai-xu, shuo.cs, stayhsfLee, sunhaibotb, sunjincheng121, tianboxiu, tianchen, tianchen92, tison, tszkitlo40, unknown, vinoyang, vthinkxie, wangpeibin, wangxiaowei, wangxiyuan, wangxlong, wangyang0918, whlwanghailong, xuchao0903, xuyang1706, yanghua, yangjf2019, yongqiang chai, yuzhao.cyz, zentol, zhangzhanchum, zhengcanbin, zhijiang, zhongyong jin, zhuzhu.zz, zjuwangg, zoudaokoulife, 砚田, 谢磊, 张志豪, 曹建华
-`}),e.add({id:193,href:"/2020/02/03/a-guide-for-unit-testing-in-apache-flink/",title:"A Guide for Unit Testing in Apache Flink",section:"Flink Blog",content:`Writing unit tests is one of the essential tasks of designing a production-grade application. Without tests, a single change in code can result in cascades of failure in production. Thus unit tests should be written for all types of applications, be it a simple job cleaning data and training a model or a complex multi-tenant, real-time data processing system. In the following sections, we provide a guide for unit testing of Apache Flink applications. Apache Flink provides a robust unit testing framework to make sure your applications behave in production as expected during development. You need to include the following dependencies to utilize the provided framework.
+`}),e.add({id:194,href:"/2020/02/03/a-guide-for-unit-testing-in-apache-flink/",title:"A Guide for Unit Testing in Apache Flink",section:"Flink Blog",content:`Writing unit tests is one of the essential tasks of designing a production-grade application. Without tests, a single change in code can result in cascades of failure in production. Thus unit tests should be written for all types of applications, be it a simple job cleaning data and training a model or a complex multi-tenant, real-time data processing system. In the following sections, we provide a guide for unit testing of Apache Flink applications. Apache Flink provides a robust unit testing framework to make sure your applications behave in production as expected during development. You need to include the following dependencies to utilize the provided framework.
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-test-utils_\${scala.binary.version}&lt;/artifactId&gt; &lt;version&gt;\${flink.version}&lt;/version&gt; &lt;scope&gt;test&lt;/scope&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-runtime_\${scala.binary.version}&lt;/artifactId&gt; &lt;version&gt;\${flink.version}&lt;/version&gt; &lt;scope&gt;test&lt;/scope&gt; &lt;classifier&gt;tests&lt;/classifier&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_\${scala.binary.version}&lt;/artifactId&gt; &lt;version&gt;\${flink.version}&lt;/version&gt; &lt;scope&gt;test&lt;/scope&gt; &lt;classifier&gt;tests&lt;/classifier&gt; &lt;/dependency&gt; The strategy of writing unit tests differs for various operators. You can break down the strategy into the following three buckets:
 Stateless Operators Stateful Operators Timed Process Operators Stateless Operators # Writing unit tests for a stateless operator is a breeze. You need to follow the basic norm of writing a test case, i.e., create an instance of the function class and test the appropriate methods. Let’s take an example of a simple Map operator.
 public class MyStatelessMap implements MapFunction&lt;String, String&gt; { @Override public String map(String in) throws Exception { String out = &#34;hello &#34; + in; return out; } } The test case for the above operator should look like
@@ -3822,13 +3832,13 @@
 Let’s take a look at the test case
 @Test public void testProcessElement() throws Exception{ MyProcessFunction myProcessFunction = new MyProcessFunction(); OneInputStreamOperatorTestHarness&lt;String, String&gt; testHarness = new KeyedOneInputStreamOperatorTestHarness&lt;&gt;( new KeyedProcessOperator&lt;&gt;(myProcessFunction), x -&gt; &#34;1&#34;, Types.STRING); // Function time is initialized to 0 testHarness.open(); testHarness.processElement(&#34;world&#34;, 10); Assert.assertEquals( Lists.newArrayList(new StreamRecord&lt;&gt;(&#34;hello world&#34;, 10)), testHarness.extractOutputStreamRecords()); } @Test public void testOnTimer() throws Exception { MyProcessFunction myProcessFunction = new MyProcessFunction(); OneInputStreamOperatorTestHarness&lt;String, String&gt; testHarness = new KeyedOneInputStreamOperatorTestHarness&lt;&gt;( new KeyedProcessOperator&lt;&gt;(myProcessFunction), x -&gt; &#34;1&#34;, Types.STRING); testHarness.open(); testHarness.processElement(&#34;world&#34;, 10); Assert.assertEquals(1, testHarness.numProcessingTimeTimers()); // Function time is set to 50 testHarness.setProcessingTime(50); Assert.assertEquals( Lists.newArrayList( new StreamRecord&lt;&gt;(&#34;hello world&#34;, 10), new StreamRecord&lt;&gt;(&#34;Timer triggered at timestamp 50&#34;)), testHarness.extractOutputStreamRecords()); } The mechanism to test the multi-input stream operators such as CoProcess functions is similar to the ones described in this article. You should use the TwoInput variant of the harness for these operators, such as TwoInputStreamOperatorTestHarness.
 Summary # In the previous sections we showcased how unit testing in Apache Flink works for stateless, stateful and times-aware-operators. We hope you found the steps easy to follow and execute while developing your Flink applications. If you have any questions or feedback you can reach out to me here or contact the community on the Apache Flink user mailing list.
-`}),e.add({id:194,href:"/2020/01/30/apache-flink-1.9.2-released/",title:"Apache Flink 1.9.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.9 series.
+`}),e.add({id:195,href:"/2020/01/30/apache-flink-1.9.2-released/",title:"Apache Flink 1.9.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.9 series.
 This release includes 117 fixes and minor improvements for Flink 1.9.1. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.9.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.9.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.9.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.9.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-12122] - Spread out tasks evenly across all available registered TaskManagers [FLINK-13360] - Add documentation for HBase connector for Table API &amp; SQL [FLINK-13361] - Add documentation for JDBC connector for Table API &amp; SQL [FLINK-13723] - Use liquid-c for faster doc generation [FLINK-13724] - Remove unnecessary whitespace from the docs&#39; sidenav [FLINK-13725] - Use sassc for faster doc generation [FLINK-13726] - Build docs with jekyll 4.0.0.pre.beta1 [FLINK-13791] - Speed up sidenav by using group_by [FLINK-13817] - Expose whether web submissions are enabled [FLINK-13818] - Check whether web submission are enabled [FLINK-14535] - Cast exception is thrown when count distinct on decimal fields [FLINK-14735] - Improve batch schedule check input consumable performance Bug [FLINK-10377] - Remove precondition in TwoPhaseCommitSinkFunction.notifyCheckpointComplete [FLINK-10435] - Client sporadically hangs after Ctrl + C [FLINK-11120] - TIMESTAMPADD function handles TIME incorrectly [FLINK-11835] - ZooKeeperLeaderElectionITCase.testJobExecutionOnClusterWithLeaderChange failed [FLINK-12342] - Yarn Resource Manager Acquires Too Many Containers [FLINK-12399] - FilterableTableSource does not use filters on job run [FLINK-13184] - Starting a TaskExecutor blocks the YarnResourceManager&#39;s main thread [FLINK-13589] - DelimitedInputFormat index error on multi-byte delimiters with whole file input splits [FLINK-13702] - BaseMapSerializerTest.testDuplicate fails on Travis [FLINK-13708] - Transformations should be cleared because a table environment could execute multiple job [FLINK-13740] - TableAggregateITCase.testNonkeyedFlatAggregate failed on Travis [FLINK-13749] - Make Flink client respect classloading policy [FLINK-13758] - Failed to submit JobGraph when registered hdfs file in DistributedCache [FLINK-13799] - Web Job Submit Page displays stream of error message when web submit is disables in the config [FLINK-13827] - Shell variable should be escaped in start-scala-shell.sh [FLINK-13862] - Update Execution Plan docs [FLINK-13945] - Instructions for building flink-shaded against vendor repository don&#39;t work [FLINK-13969] - Resuming Externalized Checkpoint (rocks, incremental, scale down) end-to-end test fails on Travis [FLINK-13995] - Fix shading of the licence information of netty [FLINK-13999] - Correct the documentation of MATCH_RECOGNIZE [FLINK-14066] - Pyflink building failure in master and 1.9.0 version [FLINK-14074] - MesosResourceManager can&#39;t create new taskmanagers in Session Cluster Mode. [FLINK-14175] - Upgrade KPL version in flink-connector-kinesis to fix application OOM [FLINK-14200] - Temporal Table Function Joins do not work on Tables (only TableSources) on the query side [FLINK-14235] - Kafka010ProducerITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceCustomOperator fails on travis [FLINK-14315] - NPE with JobMaster.disconnectTaskManager [FLINK-14337] - HistoryServer does not handle NPE on corruped archives properly [FLINK-14347] - YARNSessionFIFOITCase.checkForProhibitedLogContents found a log with prohibited string [FLINK-14355] - Example code in state processor API docs doesn&#39;t compile [FLINK-14370] - KafkaProducerAtLeastOnceITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceRegularSink fails on Travis [FLINK-14382] - Incorrect handling of FLINK_PLUGINS_DIR on Yarn [FLINK-14398] - Further split input unboxing code into separate methods [FLINK-14413] - Shade-plugin ApacheNoticeResourceTransformer uses platform-dependent encoding [FLINK-14434] - Dispatcher#createJobManagerRunner should not start JobManagerRunner [FLINK-14445] - Python module build failed when making sdist [FLINK-14447] - Network metrics doc table render confusion [FLINK-14459] - Python module build hangs [FLINK-14524] - PostgreSQL JDBC sink generates invalid SQL in upsert mode [FLINK-14547] - UDF cannot be in the join condition in blink planner [FLINK-14561] - Don&#39;t write FLINK_PLUGINS_DIR ENV variable to Flink configuration [FLINK-14562] - RMQSource leaves idle consumer after closing [FLINK-14574] - flink-s3-fs-hadoop doesn&#39;t work with plugins mechanism [FLINK-14589] - Redundant slot requests with the same AllocationID leads to inconsistent slot table [FLINK-14641] - Fix description of metric \`fullRestarts\` [FLINK-14673] - Shouldn&#39;t expect HMS client to throw NoSuchObjectException for non-existing function [FLINK-14683] - RemoteStreamEnvironment&#39;s construction function has a wrong method [FLINK-14701] - Slot leaks if SharedSlotOversubscribedException happens [FLINK-14784] - CsvTableSink miss delimiter when row start with null member [FLINK-14817] - &quot;Streaming Aggregation&quot; document contains misleading code examples [FLINK-14846] - Correct the default writerbuffer size documentation of RocksDB [FLINK-14910] - DisableAutoGeneratedUIDs fails on keyBy [FLINK-14930] - OSS Filesystem Uses Wrong Shading Prefix [FLINK-14949] - Task cancellation can be stuck against out-of-thread error [FLINK-14951] - State TTL backend end-to-end test fail when taskManager has multiple slot [FLINK-14953] - Parquet table source should use schema type to build FilterPredicate [FLINK-14960] - Dependency shading of table modules test fails on Travis [FLINK-14976] - Cassandra Connector leaks Semaphore on Throwable; hangs on close [FLINK-15001] - The digest of sub-plan reuse should contain retraction traits for stream physical nodes [FLINK-15013] - Flink (on YARN) sometimes needs too many slots [FLINK-15030] - Potential deadlock for bounded blocking ResultPartition. [FLINK-15036] - Container startup error will be handled out side of the YarnResourceManager&#39;s main thread [FLINK-15063] - Input group and output group of the task metric are reversed [FLINK-15065] - RocksDB configurable options doc description error [FLINK-15076] - Source thread should be interrupted during the Task cancellation [FLINK-15234] - Hive table created from flink catalog table shouldn&#39;t have null properties in parameters [FLINK-15240] - is_generic key is missing for Flink table stored in HiveCatalog [FLINK-15259] - HiveInspector.toInspectors() should convert Flink constant to Hive constant [FLINK-15266] - NPE in blink planner code gen [FLINK-15361] - ParquetTableSource should pass predicate in projectFields [FLINK-15412] - LocalExecutorITCase#testParameterizedTypes failed in travis [FLINK-15413] - ScalarOperatorsTest failed in travis [FLINK-15418] - StreamExecMatchRule not set FlinkRelDistribution [FLINK-15421] - GroupAggsHandler throws java.time.LocalDateTime cannot be cast to java.sql.Timestamp [FLINK-15435] - ExecutionConfigTests.test_equals_and_hash in pyFlink fails when cpu core numbers is 6 [FLINK-15443] - Use JDBC connector write FLOAT value occur ClassCastException [FLINK-15478] - FROM_BASE64 code gen type wrong [FLINK-15489] - WebUI log refresh not working [FLINK-15522] - Misleading root cause exception when cancelling the job [FLINK-15523] - ConfigConstants generally excluded from japicmp [FLINK-15543] - Apache Camel not bundled but listed in flink-dist NOTICE [FLINK-15549] - Integer overflow in SpillingResettableMutableObjectIterator [FLINK-15577] - WindowAggregate RelNodes missing Window specs in digest [FLINK-15615] - Docs: wrong guarantees stated for the file sink Improvement [FLINK-11135] - Reorder Hadoop config loading in HadoopUtils [FLINK-12848] - Method equals() in RowTypeInfo should consider fieldsNames [FLINK-13729] - Update website generation dependencies [FLINK-14008] - Auto-generate binary licensing [FLINK-14104] - Bump Jackson to 2.10.1 [FLINK-14123] - Lower the default value of taskmanager.memory.fraction [FLINK-14206] - Let fullRestart metric count fine grained restarts as well [FLINK-14215] - Add Docs for TM and JM Environment Variable Setting [FLINK-14251] - Add FutureUtils#forward utility [FLINK-14334] - ElasticSearch docs refer to non-existent ExceptionUtils.containsThrowable [FLINK-14335] - ExampleIntegrationTest in testing docs is incorrect [FLINK-14408] - In OldPlanner, UDF open method can not be invoke when SQL is optimized [FLINK-14557] - Clean up the package of py4j [FLINK-14639] - Metrics User Scope docs refer to wrong class [FLINK-14646] - Check non-null for key in KeyGroupStreamPartitioner [FLINK-14825] - Rework state processor api documentation [FLINK-14995] - Kinesis NOTICE is incorrect [FLINK-15113] - fs.azure.account.key not hidden from global configuration [FLINK-15554] - Bump jetty-util-ajax to 9.3.24 [FLINK-15657] - Fix the python table api doc link in Python API tutorial [FLINK-15700] - Improve Python API Tutorial doc [FLINK-15726] - Fixing error message in StreamExecTableSourceScan `}),e.add({id:195,href:"/2020/01/29/state-unlocked-interacting-with-state-in-apache-flink/",title:"State Unlocked: Interacting with State in Apache Flink",section:"Flink Blog",content:` Introduction # With stateful stream-processing becoming the norm for complex event-driven applications and real-time analytics, Apache Flink is often the backbone for running business logic and managing an organization’s most valuable asset — its data — as application state in Flink.
+Sub-task [FLINK-12122] - Spread out tasks evenly across all available registered TaskManagers [FLINK-13360] - Add documentation for HBase connector for Table API &amp; SQL [FLINK-13361] - Add documentation for JDBC connector for Table API &amp; SQL [FLINK-13723] - Use liquid-c for faster doc generation [FLINK-13724] - Remove unnecessary whitespace from the docs&#39; sidenav [FLINK-13725] - Use sassc for faster doc generation [FLINK-13726] - Build docs with jekyll 4.0.0.pre.beta1 [FLINK-13791] - Speed up sidenav by using group_by [FLINK-13817] - Expose whether web submissions are enabled [FLINK-13818] - Check whether web submission are enabled [FLINK-14535] - Cast exception is thrown when count distinct on decimal fields [FLINK-14735] - Improve batch schedule check input consumable performance Bug [FLINK-10377] - Remove precondition in TwoPhaseCommitSinkFunction.notifyCheckpointComplete [FLINK-10435] - Client sporadically hangs after Ctrl + C [FLINK-11120] - TIMESTAMPADD function handles TIME incorrectly [FLINK-11835] - ZooKeeperLeaderElectionITCase.testJobExecutionOnClusterWithLeaderChange failed [FLINK-12342] - Yarn Resource Manager Acquires Too Many Containers [FLINK-12399] - FilterableTableSource does not use filters on job run [FLINK-13184] - Starting a TaskExecutor blocks the YarnResourceManager&#39;s main thread [FLINK-13589] - DelimitedInputFormat index error on multi-byte delimiters with whole file input splits [FLINK-13702] - BaseMapSerializerTest.testDuplicate fails on Travis [FLINK-13708] - Transformations should be cleared because a table environment could execute multiple job [FLINK-13740] - TableAggregateITCase.testNonkeyedFlatAggregate failed on Travis [FLINK-13749] - Make Flink client respect classloading policy [FLINK-13758] - Failed to submit JobGraph when registered hdfs file in DistributedCache [FLINK-13799] - Web Job Submit Page displays stream of error message when web submit is disables in the config [FLINK-13827] - Shell variable should be escaped in start-scala-shell.sh [FLINK-13862] - Update Execution Plan docs [FLINK-13945] - Instructions for building flink-shaded against vendor repository don&#39;t work [FLINK-13969] - Resuming Externalized Checkpoint (rocks, incremental, scale down) end-to-end test fails on Travis [FLINK-13995] - Fix shading of the licence information of netty [FLINK-13999] - Correct the documentation of MATCH_RECOGNIZE [FLINK-14066] - Pyflink building failure in master and 1.9.0 version [FLINK-14074] - MesosResourceManager can&#39;t create new taskmanagers in Session Cluster Mode. [FLINK-14175] - Upgrade KPL version in flink-connector-kinesis to fix application OOM [FLINK-14200] - Temporal Table Function Joins do not work on Tables (only TableSources) on the query side [FLINK-14235] - Kafka010ProducerITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceCustomOperator fails on travis [FLINK-14315] - NPE with JobMaster.disconnectTaskManager [FLINK-14337] - HistoryServer does not handle NPE on corruped archives properly [FLINK-14347] - YARNSessionFIFOITCase.checkForProhibitedLogContents found a log with prohibited string [FLINK-14355] - Example code in state processor API docs doesn&#39;t compile [FLINK-14370] - KafkaProducerAtLeastOnceITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceRegularSink fails on Travis [FLINK-14382] - Incorrect handling of FLINK_PLUGINS_DIR on Yarn [FLINK-14398] - Further split input unboxing code into separate methods [FLINK-14413] - Shade-plugin ApacheNoticeResourceTransformer uses platform-dependent encoding [FLINK-14434] - Dispatcher#createJobManagerRunner should not start JobManagerRunner [FLINK-14445] - Python module build failed when making sdist [FLINK-14447] - Network metrics doc table render confusion [FLINK-14459] - Python module build hangs [FLINK-14524] - PostgreSQL JDBC sink generates invalid SQL in upsert mode [FLINK-14547] - UDF cannot be in the join condition in blink planner [FLINK-14561] - Don&#39;t write FLINK_PLUGINS_DIR ENV variable to Flink configuration [FLINK-14562] - RMQSource leaves idle consumer after closing [FLINK-14574] - flink-s3-fs-hadoop doesn&#39;t work with plugins mechanism [FLINK-14589] - Redundant slot requests with the same AllocationID leads to inconsistent slot table [FLINK-14641] - Fix description of metric \`fullRestarts\` [FLINK-14673] - Shouldn&#39;t expect HMS client to throw NoSuchObjectException for non-existing function [FLINK-14683] - RemoteStreamEnvironment&#39;s construction function has a wrong method [FLINK-14701] - Slot leaks if SharedSlotOversubscribedException happens [FLINK-14784] - CsvTableSink miss delimiter when row start with null member [FLINK-14817] - &quot;Streaming Aggregation&quot; document contains misleading code examples [FLINK-14846] - Correct the default writerbuffer size documentation of RocksDB [FLINK-14910] - DisableAutoGeneratedUIDs fails on keyBy [FLINK-14930] - OSS Filesystem Uses Wrong Shading Prefix [FLINK-14949] - Task cancellation can be stuck against out-of-thread error [FLINK-14951] - State TTL backend end-to-end test fail when taskManager has multiple slot [FLINK-14953] - Parquet table source should use schema type to build FilterPredicate [FLINK-14960] - Dependency shading of table modules test fails on Travis [FLINK-14976] - Cassandra Connector leaks Semaphore on Throwable; hangs on close [FLINK-15001] - The digest of sub-plan reuse should contain retraction traits for stream physical nodes [FLINK-15013] - Flink (on YARN) sometimes needs too many slots [FLINK-15030] - Potential deadlock for bounded blocking ResultPartition. [FLINK-15036] - Container startup error will be handled out side of the YarnResourceManager&#39;s main thread [FLINK-15063] - Input group and output group of the task metric are reversed [FLINK-15065] - RocksDB configurable options doc description error [FLINK-15076] - Source thread should be interrupted during the Task cancellation [FLINK-15234] - Hive table created from flink catalog table shouldn&#39;t have null properties in parameters [FLINK-15240] - is_generic key is missing for Flink table stored in HiveCatalog [FLINK-15259] - HiveInspector.toInspectors() should convert Flink constant to Hive constant [FLINK-15266] - NPE in blink planner code gen [FLINK-15361] - ParquetTableSource should pass predicate in projectFields [FLINK-15412] - LocalExecutorITCase#testParameterizedTypes failed in travis [FLINK-15413] - ScalarOperatorsTest failed in travis [FLINK-15418] - StreamExecMatchRule not set FlinkRelDistribution [FLINK-15421] - GroupAggsHandler throws java.time.LocalDateTime cannot be cast to java.sql.Timestamp [FLINK-15435] - ExecutionConfigTests.test_equals_and_hash in pyFlink fails when cpu core numbers is 6 [FLINK-15443] - Use JDBC connector write FLOAT value occur ClassCastException [FLINK-15478] - FROM_BASE64 code gen type wrong [FLINK-15489] - WebUI log refresh not working [FLINK-15522] - Misleading root cause exception when cancelling the job [FLINK-15523] - ConfigConstants generally excluded from japicmp [FLINK-15543] - Apache Camel not bundled but listed in flink-dist NOTICE [FLINK-15549] - Integer overflow in SpillingResettableMutableObjectIterator [FLINK-15577] - WindowAggregate RelNodes missing Window specs in digest [FLINK-15615] - Docs: wrong guarantees stated for the file sink Improvement [FLINK-11135] - Reorder Hadoop config loading in HadoopUtils [FLINK-12848] - Method equals() in RowTypeInfo should consider fieldsNames [FLINK-13729] - Update website generation dependencies [FLINK-14008] - Auto-generate binary licensing [FLINK-14104] - Bump Jackson to 2.10.1 [FLINK-14123] - Lower the default value of taskmanager.memory.fraction [FLINK-14206] - Let fullRestart metric count fine grained restarts as well [FLINK-14215] - Add Docs for TM and JM Environment Variable Setting [FLINK-14251] - Add FutureUtils#forward utility [FLINK-14334] - ElasticSearch docs refer to non-existent ExceptionUtils.containsThrowable [FLINK-14335] - ExampleIntegrationTest in testing docs is incorrect [FLINK-14408] - In OldPlanner, UDF open method can not be invoke when SQL is optimized [FLINK-14557] - Clean up the package of py4j [FLINK-14639] - Metrics User Scope docs refer to wrong class [FLINK-14646] - Check non-null for key in KeyGroupStreamPartitioner [FLINK-14825] - Rework state processor api documentation [FLINK-14995] - Kinesis NOTICE is incorrect [FLINK-15113] - fs.azure.account.key not hidden from global configuration [FLINK-15554] - Bump jetty-util-ajax to 9.3.24 [FLINK-15657] - Fix the python table api doc link in Python API tutorial [FLINK-15700] - Improve Python API Tutorial doc [FLINK-15726] - Fixing error message in StreamExecTableSourceScan `}),e.add({id:196,href:"/2020/01/29/state-unlocked-interacting-with-state-in-apache-flink/",title:"State Unlocked: Interacting with State in Apache Flink",section:"Flink Blog",content:` Introduction # With stateful stream-processing becoming the norm for complex event-driven applications and real-time analytics, Apache Flink is often the backbone for running business logic and managing an organization’s most valuable asset — its data — as application state in Flink.
 In order to provide a state-of-the-art experience to Flink developers, the Apache Flink community makes significant efforts to provide the safety and future-proof guarantees organizations need while managing state in Flink. In particular, Flink developers should have sufficient means to access and modify their state, as well as making bootstrapping state with existing data from external systems a piece-of-cake. These efforts span multiple Flink major releases and consist of the following:
 Evolvable state schema in Apache Flink Flexibility in swapping state backends, and The State processor API, an offline tool to read, write and modify state in Flink This post discusses the community’s efforts related to state management in Flink, provides some practical examples of how the different features and APIs can be utilized and covers some future ideas for new and improved ways of managing state in Apache Flink.
 Stream processing: What is State? # To set the tone for the remaining of the post, let us first try to explain the very definition of state in stream processing. When it comes to stateful stream processing, state comprises of the information that an application or stream processing engine will remember across events and streams as more realtime (unbounded) and/or offline (bounded) data flow through the system. Most trivial applications are inherently stateful; even the example of a simple COUNT operation, whereby when counting up to 10, you essentially need to remember that you have already counted up to 9.
@@ -3861,7 +3871,7 @@
 Beyond widening the scope of the State Processor API, the Flink community is discussing a few additional ways to improve the way developers interact with state in Flink. One of them is the proposal for a Unified Savepoint Format (FLIP-41) for all keyed state backends. Such improvement aims at introducing a unified binary format across all savepoints in all keyed state backends, something that drastically reduces the overhead of swapping the state backend in a Flink application. Such an improvement would allow developers to take a savepoint in their application and restart it in a different state backend — for example, moving it from the heap to disk (RocksDB state backend) and back — depending on the scalability and evolution of the application at different points-in-time.
 The community is also discussing the ability to have upgradability dry runs in upcoming Flink releases. Having such functionality in Flink allows developers to detect incompatible updates offline without the need of starting a new Flink job from scratch. For example, Flink users will be able to uncover topology or schema incompatibilities upon upgrading a Flink job, without having to load the state back to a running Flink job in the first place. Additionally, with upgradability dry runs Flink users will be able to get information about the registered state through the streaming graph, without needing to access the state in the state backend.
 With all the exciting new functionality added in Flink 1.9 as well as some solid ideas and discussions around bringing state in Flink to the next level, the community is committed to making state in Apache Flink a fundamental element of the framework, something that is ever-present across versions and upgrades of your application and a component that is a true first-class citizen in Apache Flink. We encourage you to sign up to the mailing list and stay on top of the announcements and new features in upcoming releases.
-`}),e.add({id:196,href:"/2020/01/15/advanced-flink-application-patterns-vol.1-case-study-of-a-fraud-detection-system/",title:"Advanced Flink Application Patterns Vol.1: Case Study of a Fraud Detection System",section:"Flink Blog",content:`In this series of blog posts you will learn about three powerful Flink patterns for building streaming applications:
+`}),e.add({id:197,href:"/2020/01/15/advanced-flink-application-patterns-vol.1-case-study-of-a-fraud-detection-system/",title:"Advanced Flink Application Patterns Vol.1: Case Study of a Fraud Detection System",section:"Flink Blog",content:`In this series of blog posts you will learn about three powerful Flink patterns for building streaming applications:
 Dynamic updates of application logic Dynamic data partitioning (shuffle), controlled at runtime Low latency alerting based on custom windowing logic (without using the window API) These patterns expand the possibilities of what is achievable with statically defined data flows and provide the building blocks to fulfill complex business requirements.
 Dynamic updates of application logic allow Flink jobs to change at runtime, without downtime from stopping and resubmitting the code.
 Dynamic data partitioning provides the ability to change how events are distributed and grouped by Flink at runtime. Such functionality often becomes a natural requirement when building jobs with dynamically reconfigurable application logic.
@@ -3901,13 +3911,13 @@
 To remain focused on describing the core mechanics of the pattern, we kept the complexity of the DSL and the underlying rules engine to a minimum. Going forward, it is easy to imagine adding extensions such as allowing more sophisticated rule definitions, including filtering of certain events, logical rules chaining, and other more advanced functionality.
 In the second part of this series, we will describe how the rules make their way into the running Fraud Detection engine. Additionally, we will go over the implementation details of the main processing function of the pipeline - DynamicAlertFunction().
 Figure 4: End-to-end pipeline In the next article, we will see how Flink&rsquo;s broadcast streams can be utilized to help steer the processing within the Fraud Detection engine at runtime (Dynamic Application Updates pattern).
-`}),e.add({id:197,href:"/2019/12/11/apache-flink-1.8.3-released/",title:"Apache Flink 1.8.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.8 series.
+`}),e.add({id:198,href:"/2019/12/11/apache-flink-1.8.3-released/",title:"Apache Flink 1.8.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.8 series.
 This release includes 45 fixes and minor improvements for Flink 1.8.2. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.8.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-13723] - Use liquid-c for faster doc generation [FLINK-13724] - Remove unnecessary whitespace from the docs&#39; sidenav [FLINK-13725] - Use sassc for faster doc generation [FLINK-13726] - Build docs with jekyll 4.0.0.pre.beta1 [FLINK-13791] - Speed up sidenav by using group_by Bug [FLINK-12342] - Yarn Resource Manager Acquires Too Many Containers [FLINK-13184] - Starting a TaskExecutor blocks the YarnResourceManager&#39;s main thread [FLINK-13728] - Fix wrong closing tag order in sidenav [FLINK-13746] - Elasticsearch (v2.3.5) sink end-to-end test fails on Travis [FLINK-13749] - Make Flink client respect classloading policy [FLINK-13892] - HistoryServerTest failed on Travis [FLINK-13936] - NOTICE-binary is outdated [FLINK-13966] - Jar sorting in collect_license_files.sh is locale dependent [FLINK-13995] - Fix shading of the licence information of netty [FLINK-13999] - Correct the documentation of MATCH_RECOGNIZE [FLINK-14009] - Cron jobs broken due to verifying incorrect NOTICE-binary file [FLINK-14010] - Dispatcher &amp; JobManagers don&#39;t give up leadership when AM is shut down [FLINK-14043] - SavepointMigrationTestBase is super slow [FLINK-14107] - Kinesis consumer record emitter deadlock under event time alignment [FLINK-14175] - Upgrade KPL version in flink-connector-kinesis to fix application OOM [FLINK-14235] - Kafka010ProducerITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceCustomOperator fails on travis [FLINK-14315] - NPE with JobMaster.disconnectTaskManager [FLINK-14337] - HistoryServerTest.testHistoryServerIntegration failed on Travis [FLINK-14347] - YARNSessionFIFOITCase.checkForProhibitedLogContents found a log with prohibited string [FLINK-14370] - KafkaProducerAtLeastOnceITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceRegularSink fails on Travis [FLINK-14398] - Further split input unboxing code into separate methods [FLINK-14413] - shade-plugin ApacheNoticeResourceTransformer uses platform-dependent encoding [FLINK-14434] - Dispatcher#createJobManagerRunner should not start JobManagerRunner [FLINK-14562] - RMQSource leaves idle consumer after closing [FLINK-14589] - Redundant slot requests with the same AllocationID leads to inconsistent slot table [FLINK-15036] - Container startup error will be handled out side of the YarnResourceManager&#39;s main thread Improvement [FLINK-12848] - Method equals() in RowTypeInfo should consider fieldsNames [FLINK-13729] - Update website generation dependencies [FLINK-13965] - Keep hasDeprecatedKeys and deprecatedKeys methods in ConfigOption and mark it with @Deprecated annotation [FLINK-13967] - Generate full binary licensing via collect_license_files.sh [FLINK-13968] - Add travis check for the correctness of the binary licensing [FLINK-13991] - Add git exclusion for 1.9+ features to 1.8 [FLINK-14008] - Auto-generate binary licensing [FLINK-14104] - Bump Jackson to 2.10.1 [FLINK-14123] - Lower the default value of taskmanager.memory.fraction [FLINK-14215] - Add Docs for TM and JM Environment Variable Setting [FLINK-14334] - ElasticSearch docs refer to non-existent ExceptionUtils.containsThrowable [FLINK-14639] - Fix the document of Metrics that has an error for \`User Scope\` [FLINK-14646] - Check non-null for key in KeyGroupStreamPartitioner [FLINK-14995] - Kinesis NOTICE is incorrect `}),e.add({id:198,href:"/2019/11/25/how-to-query-pulsar-streams-using-apache-flink/",title:"How to query Pulsar Streams using Apache Flink",section:"Flink Blog",content:`In a previous story on the Flink blog, we explained the different ways that Apache Flink and Apache Pulsar can integrate to provide elastic data processing at large scale. This blog post discusses the new developments and integrations between the two frameworks and showcases how you can leverage Pulsar’s built-in schema to query Pulsar streams in real time using Apache Flink.
+Sub-task [FLINK-13723] - Use liquid-c for faster doc generation [FLINK-13724] - Remove unnecessary whitespace from the docs&#39; sidenav [FLINK-13725] - Use sassc for faster doc generation [FLINK-13726] - Build docs with jekyll 4.0.0.pre.beta1 [FLINK-13791] - Speed up sidenav by using group_by Bug [FLINK-12342] - Yarn Resource Manager Acquires Too Many Containers [FLINK-13184] - Starting a TaskExecutor blocks the YarnResourceManager&#39;s main thread [FLINK-13728] - Fix wrong closing tag order in sidenav [FLINK-13746] - Elasticsearch (v2.3.5) sink end-to-end test fails on Travis [FLINK-13749] - Make Flink client respect classloading policy [FLINK-13892] - HistoryServerTest failed on Travis [FLINK-13936] - NOTICE-binary is outdated [FLINK-13966] - Jar sorting in collect_license_files.sh is locale dependent [FLINK-13995] - Fix shading of the licence information of netty [FLINK-13999] - Correct the documentation of MATCH_RECOGNIZE [FLINK-14009] - Cron jobs broken due to verifying incorrect NOTICE-binary file [FLINK-14010] - Dispatcher &amp; JobManagers don&#39;t give up leadership when AM is shut down [FLINK-14043] - SavepointMigrationTestBase is super slow [FLINK-14107] - Kinesis consumer record emitter deadlock under event time alignment [FLINK-14175] - Upgrade KPL version in flink-connector-kinesis to fix application OOM [FLINK-14235] - Kafka010ProducerITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceCustomOperator fails on travis [FLINK-14315] - NPE with JobMaster.disconnectTaskManager [FLINK-14337] - HistoryServerTest.testHistoryServerIntegration failed on Travis [FLINK-14347] - YARNSessionFIFOITCase.checkForProhibitedLogContents found a log with prohibited string [FLINK-14370] - KafkaProducerAtLeastOnceITCase&gt;KafkaProducerTestBase.testOneToOneAtLeastOnceRegularSink fails on Travis [FLINK-14398] - Further split input unboxing code into separate methods [FLINK-14413] - shade-plugin ApacheNoticeResourceTransformer uses platform-dependent encoding [FLINK-14434] - Dispatcher#createJobManagerRunner should not start JobManagerRunner [FLINK-14562] - RMQSource leaves idle consumer after closing [FLINK-14589] - Redundant slot requests with the same AllocationID leads to inconsistent slot table [FLINK-15036] - Container startup error will be handled out side of the YarnResourceManager&#39;s main thread Improvement [FLINK-12848] - Method equals() in RowTypeInfo should consider fieldsNames [FLINK-13729] - Update website generation dependencies [FLINK-13965] - Keep hasDeprecatedKeys and deprecatedKeys methods in ConfigOption and mark it with @Deprecated annotation [FLINK-13967] - Generate full binary licensing via collect_license_files.sh [FLINK-13968] - Add travis check for the correctness of the binary licensing [FLINK-13991] - Add git exclusion for 1.9+ features to 1.8 [FLINK-14008] - Auto-generate binary licensing [FLINK-14104] - Bump Jackson to 2.10.1 [FLINK-14123] - Lower the default value of taskmanager.memory.fraction [FLINK-14215] - Add Docs for TM and JM Environment Variable Setting [FLINK-14334] - ElasticSearch docs refer to non-existent ExceptionUtils.containsThrowable [FLINK-14639] - Fix the document of Metrics that has an error for \`User Scope\` [FLINK-14646] - Check non-null for key in KeyGroupStreamPartitioner [FLINK-14995] - Kinesis NOTICE is incorrect `}),e.add({id:199,href:"/2019/11/25/how-to-query-pulsar-streams-using-apache-flink/",title:"How to query Pulsar Streams using Apache Flink",section:"Flink Blog",content:`In a previous story on the Flink blog, we explained the different ways that Apache Flink and Apache Pulsar can integrate to provide elastic data processing at large scale. This blog post discusses the new developments and integrations between the two frameworks and showcases how you can leverage Pulsar’s built-in schema to query Pulsar streams in real time using Apache Flink.
 A short intro to Apache Pulsar # Apache Pulsar is a flexible pub/sub messaging system, backed by durable log storage. Some of the framework’s highlights include multi-tenancy, a unified message model, structured event streams and a cloud-native architecture that make it a perfect fit for a wide set of use cases, ranging from billing, payments and trading services all the way to the unification of the different messaging architectures in an organization. If you are interested in finding out more about Pulsar, you can visit the Apache Pulsar documentation or get in touch with the Pulsar community on Slack.
 Existing Pulsar &amp; Flink integration (Apache Flink 1.6+) # The existing integration between Pulsar and Flink exploits Pulsar as a message queue in a Flink application. Flink developers can utilize Pulsar as a streaming source and streaming sink for their Flink applications by selecting a specific Pulsar source and connecting to their desired Pulsar cluster and topic:
 // create and configure Pulsar consumer PulsarSourceBuilder&lt;String&gt;builder = PulsarSourceBuilder .builder(new SimpleStringSchema()) .serviceUrl(serviceUrl) .topic(inputTopic) .subsciptionName(subscription); SourceFunction&lt;String&gt; src = builder.build(); // ingest DataStream with Pulsar consumer DataStream&lt;String&gt; words = env.addSource(src); Pulsar streams can then get connected to the Flink processing logic…
@@ -3925,7 +3935,7 @@
 Next Steps &amp; Future Integration # The goal of the integration between Pulsar and Flink is to simplify how developers use the two frameworks to build a unified data processing stack. As we progress from the classical Lamda architectures — where an online, speeding layer is combined with an offline, batch layer to run data computations — Flink and Pulsar present a great combination in providing a truly unified data processing stack. We see Flink as a unified computation engine, handling both online (streaming) and offline (batch) workloads and Pulsar as the unified data storage layer for a truly unified data processing stack that simplifies developer workloads.
 There is still a lot of ongoing work and effort from both communities in getting the integration even better, such as a new source API (FLIP-27) that will allow the contribution of the Pulsar connectors to the Flink community as well as a new subscription type called Key_Shared subscription type in Pulsar that will allow efficient scaling of the source parallelism. Additional efforts focus around the provision of end-to-end, exactly-once guarantees (currently available only in the source Pulsar connector, and not the sink Pulsar connector) and more efforts around using Pulsar/BookKeeper as a Flink state backend.
 You can find a more detailed overview of the integration work between the two communities in this recording video from Flink Forward Europe 2019 or sign up to the Flink dev mailing list for the latest contribution and integration efforts between Flink and Pulsar.
-`}),e.add({id:199,href:"/2019/11/06/running-apache-flink-on-kubernetes-with-kudo/",title:"Running Apache Flink on Kubernetes with KUDO",section:"Flink Blog",content:`A common use case for Apache Flink is streaming data analytics together with Apache Kafka, which provides a pub/sub model and durability for data streams. To achieve elastic scalability, both are typically deployed in clustered environments, and increasingly on top of container orchestration platforms like Kubernetes. The Operator pattern provides an extension mechanism to Kubernetes that captures human operator knowledge about an application, like Flink, in software to automate its operation. KUDO is an open source toolkit for building Operators using declarative YAML specs, with a focus on ease of use for cluster admins and developers.
+`}),e.add({id:200,href:"/2019/11/06/running-apache-flink-on-kubernetes-with-kudo/",title:"Running Apache Flink on Kubernetes with KUDO",section:"Flink Blog",content:`A common use case for Apache Flink is streaming data analytics together with Apache Kafka, which provides a pub/sub model and durability for data streams. To achieve elastic scalability, both are typically deployed in clustered environments, and increasingly on top of container orchestration platforms like Kubernetes. The Operator pattern provides an extension mechanism to Kubernetes that captures human operator knowledge about an application, like Flink, in software to automate its operation. KUDO is an open source toolkit for building Operators using declarative YAML specs, with a focus on ease of use for cluster admins and developers.
 In this blog post we demonstrate how to orchestrate a streaming data analytics application based on Flink and Kafka with KUDO. It consists of a Flink job that checks financial transactions for fraud, and two microservices that generate and display the transactions. You can find more details about this demo in the KUDO Operators repository, including instructions for installing the dependencies.
 Prerequisites # You can run this demo on your local machine using minikube. The instructions below were tested with minikube v1.5.1 and Kubernetes v1.16.2 but should work on any Kubernetes version above v1.15.0. First, start a minikube cluster with enough capacity:
 minikube start --cpus=6 --memory=9216 --disk-size=10g
@@ -3951,13 +3961,13 @@
 The job is up and running and we should now be able to see fraudulent transaction in the logs of the actor pod:
 $ kubectl logs $(kubectl get pod -l actor=flink-demo -o jsonpath=&#34;{.items[0].metadata.name}&#34;) Broker: flink-demo-kafka-kafka-0.flink-demo-kafka-svc:9093 Topic: fraud Detected Fraud: TransactionAggregate {startTimestamp=0, endTimestamp=1563395831000, totalAmount=19895: Transaction{timestamp=1563395778000, origin=1, target=&#39;3&#39;, amount=8341} Transaction{timestamp=1563395813000, origin=1, target=&#39;3&#39;, amount=8592} Transaction{timestamp=1563395817000, origin=1, target=&#39;3&#39;, amount=2802} Transaction{timestamp=1563395831000, origin=1, target=&#39;3&#39;, amount=160}} If you add the “-f” flag to the previous command, you can follow along as more transactions are streaming in and are evaluated by our Flink job.
 Conclusion # In this blog post we demonstrated how to easily deploy an end-to-end streaming data application on Kubernetes using KUDO. We deployed a Flink job and two microservices, as well as all the required infrastructure - Flink, Kafka, and ZooKeeper using just a few kubectl commands. To find out more about KUDO, visit the project website or join the community on Slack.
-`}),e.add({id:200,href:"/2019/10/18/apache-flink-1.9.1-released/",title:"Apache Flink 1.9.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.9 series.
+`}),e.add({id:201,href:"/2019/10/18/apache-flink-1.9.1-released/",title:"Apache Flink 1.9.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.9 series.
 This release includes 96 fixes and minor improvements for Flink 1.9.0. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.9.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.9.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.9.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.9.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Bug [FLINK-11630] - TaskExecutor does not wait for Task termination when terminating itself [FLINK-13490] - Fix if one column value is null when reading JDBC, the following values are all null [FLINK-13941] - Prevent data-loss by not cleaning up small part files from S3. [FLINK-12501] - AvroTypeSerializer does not work with types generated by avrohugger [FLINK-13386] - Fix some frictions in the new default Web UI [FLINK-13526] - Switching to a non existing catalog or database crashes sql-client [FLINK-13568] - DDL create table doesn&#39;t allow STRING data type [FLINK-13805] - Bad Error Message when TaskManager is lost [FLINK-13806] - Metric Fetcher floods the JM log with errors when TM is lost [FLINK-14010] - Dispatcher &amp; JobManagers don&#39;t give up leadership when AM is shut down [FLINK-14145] - CompletedCheckpointStore#getLatestCheckpoint(true) returns wrong checkpoint [FLINK-13059] - Cassandra Connector leaks Semaphore on Exception and hangs on close [FLINK-13534] - Unable to query Hive table with decimal column [FLINK-13562] - Throws exception when FlinkRelMdColumnInterval meets two stage stream group aggregate [FLINK-13563] - TumblingGroupWindow should implement toString method [FLINK-13564] - Throw exception if constant with YEAR TO MONTH resolution was used for group windows [FLINK-13588] - StreamTask.handleAsyncException throws away the exception cause [FLINK-13653] - ResultStore should avoid using RowTypeInfo when creating a result [FLINK-13711] - Hive array values not properly displayed in SQL CLI [FLINK-13737] - flink-dist should add provided dependency on flink-examples-table [FLINK-13738] - Fix NegativeArraySizeException in LongHybridHashTable [FLINK-13742] - Fix code generation when aggregation contains both distinct aggregate with and without filter [FLINK-13760] - Fix hardcode Scala version dependency in hive connector [FLINK-13761] - \`SplitStream\` should be deprecated because \`SplitJavaStream\` is deprecated [FLINK-13789] - Transactional Id Generation fails due to user code impacting formatting string [FLINK-13823] - Incorrect debug log in CompileUtils [FLINK-13825] - The original plugins dir is not restored after e2e test run [FLINK-13831] - Free Slots / All Slots display error [FLINK-13887] - Ensure defaultInputDependencyConstraint to be non-null when setting it in ExecutionConfig [FLINK-13897] - OSS FS NOTICE file is placed in wrong directory [FLINK-13933] - Hive Generic UDTF can not be used in table API both stream and batch mode [FLINK-13936] - NOTICE-binary is outdated [FLINK-13966] - Jar sorting in collect_license_files.sh is locale dependent [FLINK-14009] - Cron jobs broken due to verifying incorrect NOTICE-binary file [FLINK-14049] - Update error message for failed partition updates to include task name [FLINK-14076] - &#39;ClassNotFoundException: KafkaException&#39; on Flink v1.9 w/ checkpointing [FLINK-14107] - Kinesis consumer record emitter deadlock under event time alignment [FLINK-14119] - Clean idle state for RetractableTopNFunction [FLINK-14139] - Fix potential memory leak of rest server when using session/standalone cluster [FLINK-14140] - The Flink Logo Displayed in Flink Python Shell is Broken [FLINK-14150] - Unnecessary __pycache__ directories appears in pyflink.zip [FLINK-14288] - Add Py4j NOTICE for source release [FLINK-13892] - HistoryServerTest failed on Travis [FLINK-14043] - SavepointMigrationTestBase is super slow [FLINK-12164] - JobMasterTest.testJobFailureWhenTaskExecutorHeartbeatTimeout is unstable [FLINK-9900] - Fix unstable test ZooKeeperHighAvailabilityITCase#testRestoreBehaviourWithFaultyStateHandles [FLINK-13484] - ConnectedComponents end-to-end test instable with NoResourceAvailableException [FLINK-13489] - Heavy deployment end-to-end test fails on Travis with TM heartbeat timeout [FLINK-13514] - StreamTaskTest.testAsyncCheckpointingConcurrentCloseAfterAcknowledge unstable [FLINK-13530] - AbstractServerTest failed on Travis [FLINK-13585] - Fix sporadical deallock in TaskAsyncCallTest#testSetsUserCodeClassLoader() [FLINK-13599] - Kinesis end-to-end test failed on Travis [FLINK-13663] - SQL Client end-to-end test for modern Kafka failed on Travis [FLINK-13688] - HiveCatalogUseBlinkITCase.testBlinkUdf constantly failed [FLINK-13739] - BinaryRowTest.testWriteString() fails in some environments [FLINK-13746] - Elasticsearch (v2.3.5) sink end-to-end test fails on Travis [FLINK-13769] - BatchFineGrainedRecoveryITCase.testProgram failed on Travis [FLINK-13807] - Flink-avro unit tests fails if the character encoding in the environment is not default to UTF-8 Improvement [FLINK-13965] - Keep hasDeprecatedKeys and deprecatedKeys methods in ConfigOption and mark it with @Deprecated annotation [FLINK-9941] - Flush in ScalaCsvOutputFormat before close method [FLINK-13336] - Remove the legacy batch fault tolerance page and redirect it to the new task failure recovery page [FLINK-13380] - Improve the usability of Flink session cluster on Kubernetes [FLINK-13819] - Introduce RpcEndpoint State [FLINK-13845] - Drop all the content of removed &quot;Checkpointed&quot; interface [FLINK-13957] - Log dynamic properties on job submission [FLINK-13967] - Generate full binary licensing via collect_license_files.sh [FLINK-13968] - Add travis check for the correctness of the binary licensing [FLINK-13449] - Add ARM architecture to MemoryArchitecture Documentation [FLINK-13105] - Add documentation for blink planner&#39;s built-in functions [FLINK-13277] - add documentation of Hive source/sink [FLINK-13354] - Add documentation for how to use blink planner [FLINK-13355] - Add documentation for Temporal Table Join in blink planner [FLINK-13356] - Add documentation for TopN and Deduplication in blink planner [FLINK-13359] - Add documentation for DDL introduction [FLINK-13362] - Add documentation for Kafka &amp; ES &amp; FileSystem DDL [FLINK-13363] - Add documentation for streaming aggregate performance tunning. [FLINK-13706] - add documentation of how to use Hive functions in Flink [FLINK-13942] - Add Overview page for Getting Started section [FLINK-13863] - Update Operations Playground to Flink 1.9.0 [FLINK-13937] - Fix wrong hive dependency version in documentation [FLINK-13830] - The Document about Cluster on yarn have some problems [FLINK-14160] - Extend Operations Playground with --backpressure option [FLINK-13388] - Update UI screenshots in the documentation to the new default Web Frontend [FLINK-13415] - Document how to use hive connector in scala shell [FLINK-13517] - Restructure Hive Catalog documentation [FLINK-13643] - Document the workaround for users with a different minor Hive version [FLINK-13757] - Fix wrong description of "IS NOT TRUE" function documentation `}),e.add({id:201,href:"/2019/09/13/the-state-processor-api-how-to-read-write-and-modify-the-state-of-flink-applications/",title:"The State Processor API: How to Read, write and modify the state of Flink applications",section:"Flink Blog",content:`Whether you are running Apache FlinkⓇ in production or evaluated Flink as a computation framework in the past, you&rsquo;ve probably found yourself asking the question: How can I access, write or update state in a Flink savepoint? Ask no more! Apache Flink 1.9.0 introduces the State Processor API, a powerful extension of the DataSet API that allows reading, writing and modifying state in Flink&rsquo;s savepoints and checkpoints.
+Bug [FLINK-11630] - TaskExecutor does not wait for Task termination when terminating itself [FLINK-13490] - Fix if one column value is null when reading JDBC, the following values are all null [FLINK-13941] - Prevent data-loss by not cleaning up small part files from S3. [FLINK-12501] - AvroTypeSerializer does not work with types generated by avrohugger [FLINK-13386] - Fix some frictions in the new default Web UI [FLINK-13526] - Switching to a non existing catalog or database crashes sql-client [FLINK-13568] - DDL create table doesn&#39;t allow STRING data type [FLINK-13805] - Bad Error Message when TaskManager is lost [FLINK-13806] - Metric Fetcher floods the JM log with errors when TM is lost [FLINK-14010] - Dispatcher &amp; JobManagers don&#39;t give up leadership when AM is shut down [FLINK-14145] - CompletedCheckpointStore#getLatestCheckpoint(true) returns wrong checkpoint [FLINK-13059] - Cassandra Connector leaks Semaphore on Exception and hangs on close [FLINK-13534] - Unable to query Hive table with decimal column [FLINK-13562] - Throws exception when FlinkRelMdColumnInterval meets two stage stream group aggregate [FLINK-13563] - TumblingGroupWindow should implement toString method [FLINK-13564] - Throw exception if constant with YEAR TO MONTH resolution was used for group windows [FLINK-13588] - StreamTask.handleAsyncException throws away the exception cause [FLINK-13653] - ResultStore should avoid using RowTypeInfo when creating a result [FLINK-13711] - Hive array values not properly displayed in SQL CLI [FLINK-13737] - flink-dist should add provided dependency on flink-examples-table [FLINK-13738] - Fix NegativeArraySizeException in LongHybridHashTable [FLINK-13742] - Fix code generation when aggregation contains both distinct aggregate with and without filter [FLINK-13760] - Fix hardcode Scala version dependency in hive connector [FLINK-13761] - \`SplitStream\` should be deprecated because \`SplitJavaStream\` is deprecated [FLINK-13789] - Transactional Id Generation fails due to user code impacting formatting string [FLINK-13823] - Incorrect debug log in CompileUtils [FLINK-13825] - The original plugins dir is not restored after e2e test run [FLINK-13831] - Free Slots / All Slots display error [FLINK-13887] - Ensure defaultInputDependencyConstraint to be non-null when setting it in ExecutionConfig [FLINK-13897] - OSS FS NOTICE file is placed in wrong directory [FLINK-13933] - Hive Generic UDTF can not be used in table API both stream and batch mode [FLINK-13936] - NOTICE-binary is outdated [FLINK-13966] - Jar sorting in collect_license_files.sh is locale dependent [FLINK-14009] - Cron jobs broken due to verifying incorrect NOTICE-binary file [FLINK-14049] - Update error message for failed partition updates to include task name [FLINK-14076] - &#39;ClassNotFoundException: KafkaException&#39; on Flink v1.9 w/ checkpointing [FLINK-14107] - Kinesis consumer record emitter deadlock under event time alignment [FLINK-14119] - Clean idle state for RetractableTopNFunction [FLINK-14139] - Fix potential memory leak of rest server when using session/standalone cluster [FLINK-14140] - The Flink Logo Displayed in Flink Python Shell is Broken [FLINK-14150] - Unnecessary __pycache__ directories appears in pyflink.zip [FLINK-14288] - Add Py4j NOTICE for source release [FLINK-13892] - HistoryServerTest failed on Travis [FLINK-14043] - SavepointMigrationTestBase is super slow [FLINK-12164] - JobMasterTest.testJobFailureWhenTaskExecutorHeartbeatTimeout is unstable [FLINK-9900] - Fix unstable test ZooKeeperHighAvailabilityITCase#testRestoreBehaviourWithFaultyStateHandles [FLINK-13484] - ConnectedComponents end-to-end test instable with NoResourceAvailableException [FLINK-13489] - Heavy deployment end-to-end test fails on Travis with TM heartbeat timeout [FLINK-13514] - StreamTaskTest.testAsyncCheckpointingConcurrentCloseAfterAcknowledge unstable [FLINK-13530] - AbstractServerTest failed on Travis [FLINK-13585] - Fix sporadical deallock in TaskAsyncCallTest#testSetsUserCodeClassLoader() [FLINK-13599] - Kinesis end-to-end test failed on Travis [FLINK-13663] - SQL Client end-to-end test for modern Kafka failed on Travis [FLINK-13688] - HiveCatalogUseBlinkITCase.testBlinkUdf constantly failed [FLINK-13739] - BinaryRowTest.testWriteString() fails in some environments [FLINK-13746] - Elasticsearch (v2.3.5) sink end-to-end test fails on Travis [FLINK-13769] - BatchFineGrainedRecoveryITCase.testProgram failed on Travis [FLINK-13807] - Flink-avro unit tests fails if the character encoding in the environment is not default to UTF-8 Improvement [FLINK-13965] - Keep hasDeprecatedKeys and deprecatedKeys methods in ConfigOption and mark it with @Deprecated annotation [FLINK-9941] - Flush in ScalaCsvOutputFormat before close method [FLINK-13336] - Remove the legacy batch fault tolerance page and redirect it to the new task failure recovery page [FLINK-13380] - Improve the usability of Flink session cluster on Kubernetes [FLINK-13819] - Introduce RpcEndpoint State [FLINK-13845] - Drop all the content of removed &quot;Checkpointed&quot; interface [FLINK-13957] - Log dynamic properties on job submission [FLINK-13967] - Generate full binary licensing via collect_license_files.sh [FLINK-13968] - Add travis check for the correctness of the binary licensing [FLINK-13449] - Add ARM architecture to MemoryArchitecture Documentation [FLINK-13105] - Add documentation for blink planner&#39;s built-in functions [FLINK-13277] - add documentation of Hive source/sink [FLINK-13354] - Add documentation for how to use blink planner [FLINK-13355] - Add documentation for Temporal Table Join in blink planner [FLINK-13356] - Add documentation for TopN and Deduplication in blink planner [FLINK-13359] - Add documentation for DDL introduction [FLINK-13362] - Add documentation for Kafka &amp; ES &amp; FileSystem DDL [FLINK-13363] - Add documentation for streaming aggregate performance tunning. [FLINK-13706] - add documentation of how to use Hive functions in Flink [FLINK-13942] - Add Overview page for Getting Started section [FLINK-13863] - Update Operations Playground to Flink 1.9.0 [FLINK-13937] - Fix wrong hive dependency version in documentation [FLINK-13830] - The Document about Cluster on yarn have some problems [FLINK-14160] - Extend Operations Playground with --backpressure option [FLINK-13388] - Update UI screenshots in the documentation to the new default Web Frontend [FLINK-13415] - Document how to use hive connector in scala shell [FLINK-13517] - Restructure Hive Catalog documentation [FLINK-13643] - Document the workaround for users with a different minor Hive version [FLINK-13757] - Fix wrong description of "IS NOT TRUE" function documentation `}),e.add({id:202,href:"/2019/09/13/the-state-processor-api-how-to-read-write-and-modify-the-state-of-flink-applications/",title:"The State Processor API: How to Read, write and modify the state of Flink applications",section:"Flink Blog",content:`Whether you are running Apache FlinkⓇ in production or evaluated Flink as a computation framework in the past, you&rsquo;ve probably found yourself asking the question: How can I access, write or update state in a Flink savepoint? Ask no more! Apache Flink 1.9.0 introduces the State Processor API, a powerful extension of the DataSet API that allows reading, writing and modifying state in Flink&rsquo;s savepoints and checkpoints.
 In this post, we explain why this feature is a big step for Flink, what you can use it for, and how to use it. Finally, we will discuss the future of the State Processor API and how it aligns with our plans to evolve Flink into a system for unified batch and stream processing.
 Stateful Stream Processing with Apache Flink until Flink 1.9 # All non-trivial stream processing applications are stateful and most of them are designed to run for months or years. Over time, many of them accumulate a lot of valuable state that can be very expensive or even impossible to rebuild if it gets lost due to a failure. In order to guarantee the consistency and durability of application state, Flink featured a sophisticated checkpointing and recovery mechanism from very early on. With every release, the Flink community has added more and more state-related features to improve checkpointing and recovery speed, the maintenance of applications, and practices to manage applications.
 However, a feature that was commonly requested by Flink users was the ability to access the state of an application “from the outside”. This request was motivated by the need to validate or debug the state of an application, to migrate the state of an application to another application, to evolve an application from the Heap State Backend to the RocksDB State Backend, or to import the initial state of an application from an external system like a relational database.
@@ -3972,13 +3982,13 @@
 The State Processor API now offers methods to create, load, and write a savepoint. You can read a DataSet from a loaded savepoint or convert a DataSet into a state and add it to a savepoint. DataSets can be processed with the full feature set of the DataSet API. With these building blocks, all of the before-mentioned use cases (and more) can be addressed. Please have a look at the documentation if you&rsquo;d like to learn how to use the State Processor API in detail.
 Why DataSet API? # In case you are familiar with Flink&rsquo;s roadmap, you might be surprised that the State Processor API is based on the DataSet API. The Flink community plans to extend the DataStream API with the concept of BoundedStreams and deprecate the DataSet API. When designing this feature, we also evaluated the DataStream API or Table API but neither could provide the right feature set yet. Since we didn&rsquo;t want to block this feature on the progress of Flink&rsquo;s APIs, we decided to build it on the DataSet API, but kept its dependencies on the DataSet API to a minimum. Hence, migrating it to another API should be fairly easy.
 Summary # Flink users have requested a feature to access and modify the state of streaming applications from the outside for a long time. With the State Processor API, Flink 1.9.0 finally exposes application state as a data format that can be manipulated. This feature opens up many new possibilities for how users can maintain and manage Flink streaming applications, including arbitrary evolution of stream applications and exporting and bootstrapping of application state. To put it concisely, the State Processor API unlocks the black box that savepoints used to be.
-`}),e.add({id:202,href:"/2019/09/11/apache-flink-1.8.2-released/",title:"Apache Flink 1.8.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.8 series.
+`}),e.add({id:203,href:"/2019/09/11/apache-flink-1.8.2-released/",title:"Apache Flink 1.8.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.8 series.
 This release includes 23 fixes and minor improvements for Flink 1.8.1. The list below includes a detailed list of all fixes and improvements.
 We highly recommend all users to upgrade to Flink 1.8.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.8.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Bug [FLINK-13941] - Prevent data-loss by not cleaning up small part files from S3. [FLINK-9526] - BucketingSink end-to-end test failed on Travis [FLINK-10368] - &#39;Kerberized YARN on Docker test&#39; unstable [FLINK-12319] - StackOverFlowError in cep.nfa.sharedbuffer.SharedBuffer [FLINK-12736] - ResourceManager may release TM with allocated slots [FLINK-12889] - Job keeps in FAILING state [FLINK-13059] - Cassandra Connector leaks Semaphore on Exception; hangs on close [FLINK-13159] - java.lang.ClassNotFoundException when restore job [FLINK-13367] - Make ClosureCleaner detect writeReplace serialization override [FLINK-13369] - Recursive closure cleaner ends up with stackOverflow in case of circular dependency [FLINK-13394] - Use fallback unsafe secure MapR in nightly.sh [FLINK-13484] - ConnectedComponents end-to-end test instable with NoResourceAvailableException [FLINK-13499] - Remove dependency on MapR artifact repository [FLINK-13508] - CommonTestUtils#waitUntilCondition() may attempt to sleep with negative time [FLINK-13586] - Method ClosureCleaner.clean broke backward compatibility between 1.8.0 and 1.8.1 [FLINK-13761] - \`SplitStream\` should be deprecated because \`SplitJavaStream\` is deprecated [FLINK-13789] - Transactional Id Generation fails due to user code impacting formatting string [FLINK-13806] - Metric Fetcher floods the JM log with errors when TM is lost [FLINK-13807] - Flink-avro unit tests fails if the character encoding in the environment is not default to UTF-8 [FLINK-13897] - OSS FS NOTICE file is placed in wrong directory Improvement [FLINK-12578] - Use secure URLs for Maven repositories [FLINK-12741] - Update docs about Kafka producer fault tolerance guarantees [FLINK-12749] - Add Flink Operations Playground documentation `}),e.add({id:203,href:"/2019/09/05/flink-community-update-september19/",title:"Flink Community Update - September'19",section:"Flink Blog",content:`This has been an exciting, fast-paced year for the Apache Flink community. But with over 10k messages across the mailing lists, 3k Jira tickets and 2k pull requests, it is not easy to keep up with the latest state of the project. Plus everything happening around it. With that in mind, we want to bring back regular community updates to the Flink blog.
+Bug [FLINK-13941] - Prevent data-loss by not cleaning up small part files from S3. [FLINK-9526] - BucketingSink end-to-end test failed on Travis [FLINK-10368] - &#39;Kerberized YARN on Docker test&#39; unstable [FLINK-12319] - StackOverFlowError in cep.nfa.sharedbuffer.SharedBuffer [FLINK-12736] - ResourceManager may release TM with allocated slots [FLINK-12889] - Job keeps in FAILING state [FLINK-13059] - Cassandra Connector leaks Semaphore on Exception; hangs on close [FLINK-13159] - java.lang.ClassNotFoundException when restore job [FLINK-13367] - Make ClosureCleaner detect writeReplace serialization override [FLINK-13369] - Recursive closure cleaner ends up with stackOverflow in case of circular dependency [FLINK-13394] - Use fallback unsafe secure MapR in nightly.sh [FLINK-13484] - ConnectedComponents end-to-end test instable with NoResourceAvailableException [FLINK-13499] - Remove dependency on MapR artifact repository [FLINK-13508] - CommonTestUtils#waitUntilCondition() may attempt to sleep with negative time [FLINK-13586] - Method ClosureCleaner.clean broke backward compatibility between 1.8.0 and 1.8.1 [FLINK-13761] - \`SplitStream\` should be deprecated because \`SplitJavaStream\` is deprecated [FLINK-13789] - Transactional Id Generation fails due to user code impacting formatting string [FLINK-13806] - Metric Fetcher floods the JM log with errors when TM is lost [FLINK-13807] - Flink-avro unit tests fails if the character encoding in the environment is not default to UTF-8 [FLINK-13897] - OSS FS NOTICE file is placed in wrong directory Improvement [FLINK-12578] - Use secure URLs for Maven repositories [FLINK-12741] - Update docs about Kafka producer fault tolerance guarantees [FLINK-12749] - Add Flink Operations Playground documentation `}),e.add({id:204,href:"/2019/09/05/flink-community-update-september19/",title:"Flink Community Update - September'19",section:"Flink Blog",content:`This has been an exciting, fast-paced year for the Apache Flink community. But with over 10k messages across the mailing lists, 3k Jira tickets and 2k pull requests, it is not easy to keep up with the latest state of the project. Plus everything happening around it. With that in mind, we want to bring back regular community updates to the Flink blog.
 The first post in the series takes you on an little detour across the year, to freshen up and make sure you&rsquo;re all up to date.
 The Year (so far) in Flink # Two major versions were released this year: Flink 1.8 and Flink 1.9; paving the way for the goal of making Flink the first framework to seamlessly support stream and batch processing with a single, unified runtime. The contribution of Blink to Apache Flink was key in accelerating the path to this vision and reduced the waiting time for long-pending user requests — such as Hive integration, (better) Python support, the rework of Flink&rsquo;s Machine Learning library and&hellip;fine-grained failure recovery (FLIP-1).
 The 1.9 release was the result of the biggest community effort the project has experienced so far, with the number of contributors soaring to 190 (see The Bigger Picture). For a quick overview of the upcoming work for Flink 1.10 (and beyond), have a look at the updated roadmap!
@@ -4000,7 +4010,7 @@
 North America # [Conference] Strata Data Conference 2019, September 23-26, New York, USA [Meetup] Apache Flink Bay Area Meetup, September 24, San Francisco, USA [Conference] Scale By The Bay 2019, November 13-15, San Francisco, USA Europe # [Meetup] Apache Flink London Meetup, September 23, London, UK
 [Conference] Flink Forward Europe 2019, October 7-9, Berlin, Germany
 * The next edition of Flink Forward Europe is around the corner and the [program](https://europe-2019.flink-forward.org/conference-program) has been announced, featuring 70+ talks as well as panel discussions and interactive "Ask Me Anything" sessions with core Flink committers. If you're looking to learn more about Flink and share your experience with other community members, there really is [no better place]((https://vimeo.com/296403091)) than Flink Forward! Note: if you are a committer for any Apache project, you can get a free ticket by registering with your Apache email address and using the discount code: FFEU19-ApacheCommitter. * [Conference] **[ApacheCon Berlin 2019](https://aceu19.apachecon.com/)**, October 22-24, Berlin, Germany * [Conference] **[Data2Day 2019](https://www.data2day.de/)**, October 22-24, Ludwigshafen, Germany * [Conference] **[Big Data Tech Warsaw 2020](https://bigdatatechwarsaw.eu)**, February 7, Warsaw, Poland * The Call For Presentations (CFP) is now [open](https://bigdatatechwarsaw.eu/cfp/). Asia # [Conference] Flink Forward Asia 2019, November 28-30, Beijing, China * The second edition of Flink Forward Asia is also happening later this year, in Beijing, and the CFP is [open](https://developer.aliyun.com/special/ffa2019) until September 20. If you&rsquo;d like to keep a closer eye on what’s happening in the community, subscribe to the community mailing list to get fine-grained weekly updates, upcoming event announcements and more. Also, please reach out if you&rsquo;re interested in organizing or being part of Flink events in your area!
-`}),e.add({id:204,href:"/2019/08/22/apache-flink-1.9.0-release-announcement/",title:"Apache Flink 1.9.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is proud to announce the release of Apache Flink 1.9.0.
+`}),e.add({id:205,href:"/2019/08/22/apache-flink-1.9.0-release-announcement/",title:"Apache Flink 1.9.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is proud to announce the release of Apache Flink 1.9.0.
 The Apache Flink project&rsquo;s goal is to develop a stream processing system to unify and power many forms of real-time and offline data processing applications as well as event-driven applications. In this release, we have made a huge step forward in that effort, by integrating Flink’s stream and batch processing capabilities under a single, unified runtime.
 Significant features on this path are batch-style recovery for batch jobs and a preview of the new Blink-based query engine for Table API and SQL queries. We are also excited to announce the availability of the State Processor API, which is one of the most frequently requested features and enables users to read and write savepoints with Flink DataSet jobs. Finally, Flink 1.9 includes a reworked WebUI and previews of Flink’s new Python Table API and its integration with the Apache Hive ecosystem.
 This blog post describes all major new features and improvements, important changes to be aware of and what to expect moving forward. For more details, check the complete release changelog.
@@ -4041,7 +4051,7 @@
 Important Changes # The Table API and SQL are now part of the default configuration of the Flink distribution. Before, the Table API and SQL had to be enabled by moving the corresponding JAR file from ./opt to ./lib. The machine learning library (flink-ml) has been removed in preparation for FLIP-39. The old DataSet and DataStream Python APIs have been removed in favor of FLIP-38. Flink can be compiled and run on Java 9. Note that certain components interacting with external systems (connectors, filesystems, reporters) may not work since the respective projects may have skipped Java 9 support. Release Notes # Please review the release notes for a more detailed list of changes and new features if you plan to upgrade your Flink setup to Flink 1.9.0.
 List of Contributors # We would like to thank all contributors who have made this release possible:
 Abdul Qadeer (abqadeer), Aitozi, Alberto Romero, Aleksey Pak, Alexander Fedulov, Alice Yan, Aljoscha Krettek, Aloys, Andrew Duffy, Andrey Zagrebin, Ankur, Artsem Semianenka, Benchao Li, Biao Liu, Bo WANG, Bowen L, Chesnay Schepler, Clark Yang, Congxian Qiu, Cristian, Danny Chan, David Moravek, Dawid Wysakowicz, Dian Fu, EronWright, Fabian Hueske, Fabio Lombardelli, Fokko Driesprong, Gao Yun, Gary Yao, Gen Luo, Gyula Fora, Hequn Cheng, Hongtao Zhang, Huang Xingbo, HuangXingBo, Hugo Da Cruz Louro, Humberto Rodríguez A, Hwanju Kim, Igal Shilman, Jamie Grier, Jark Wu, Jason, Jasper Yue, Jeff Zhang, Jiangjie (Becket) Qin, Jiezhi.G, Jincheng Sun, Jing Zhang, Jingsong Lee, Juan Gentile, Jungtaek Lim, Kailash Dayanand, Kevin Bohinski, Konstantin Knauf, Konstantinos Papadopoulos, Kostas Kloudas, Kurt Young, Lakshmi, Lakshmi Gururaja Rao, Leeviiii, LouisXu, Maximilian Michels, Nico Kruber, Niels Basjes, Paul Lam, PengFei Li, Peter Huang, Pierre Zemb, Piotr Nowojski, Piyush Narang, Richard Deurwaarder, Robert Metzger, Robert Stoll, Romano Vacca, Rong Rong, Rui Li, Ryantaocer, Scott Mitchell, Seth Wiesman, Shannon Carey, Shimin Yang, Stefan Richter, Stephan Ewen, Stephen Connolly, Steven Wu, SuXingLee, TANG Wen-hui, Thomas Weise, Till Rohrmann, Timo Walther, Tom Goong, TsReaper, Tzu-Li (Gordon) Tai, Ufuk Celebi, Victor Wong, WangHengwei, Wei Zhong, WeiZhong94, Xintong Song, Xpray, XuQianJin-Stars, Xuefu Zhang, Xupingyong, Yangze Guo, Yu Li, Yun Gao, Yun Tang, Zhanchun Zhang, Zhenghua Gao, Zhijiang, Zhu Zhu, Zili Chen, aloys, arganzheng, azagrebin, bd2019us, beyond1920, biao.liub, blueszheng, boshu Zheng, chenqi, chummyhe89, chunpinghe, dcadmin, dianfu, godfrey he, guanghui01.rong, hehuiyuan, hello, hequn8128, jackyyin, joongkeun.yang, klion26, lamber-ken, leesf, liguowei, lincoln-lil, liyafan82, luoqi, mans2singh, maqingxiang, maxin, mjl, okidogi, ozan, potseluev, qiangsi.lq, qiaoran, robbinli, shaoxuan-wang, shengqian.zhou, shenlang.sl, shuai-xu, sunhaibotb, tianchen, tianchen92, tison, tom_gong, vinoyang, vthinkxie, wanggeng3, wenhuitang, winifredtamg, xl38154, xuyang1706, yangfei5, yanghua, yuzhao.cyz, zhangxin516, zhangxinxing, zhaofaxian, zhijiang, zjuwangg, 林小铂, 黄培松, 时无两丶.
-`}),e.add({id:205,href:"/2019/07/23/flink-network-stack-vol.-2-monitoring-metrics-and-that-backpressure-thing/",title:"Flink Network Stack Vol. 2: Monitoring, Metrics, and that Backpressure Thing",section:"Flink Blog",content:` In a previous blog post, we presented how Flink’s network stack works from the high-level abstractions to the low-level details. This second blog post in the series of network stack posts extends on this knowledge and discusses monitoring network-related metrics to identify effects such as backpressure or bottlenecks in throughput and latency. Although this post briefly covers what to do with backpressure, the topic of tuning the network stack will be further examined in a future post. If you are unfamiliar with the network stack we highly recommend reading the network stack deep-dive first and then continuing here.
+`}),e.add({id:206,href:"/2019/07/23/flink-network-stack-vol.-2-monitoring-metrics-and-that-backpressure-thing/",title:"Flink Network Stack Vol. 2: Monitoring, Metrics, and that Backpressure Thing",section:"Flink Blog",content:` In a previous blog post, we presented how Flink’s network stack works from the high-level abstractions to the low-level details. This second blog post in the series of network stack posts extends on this knowledge and discusses monitoring network-related metrics to identify effects such as backpressure or bottlenecks in throughput and latency. Although this post briefly covers what to do with backpressure, the topic of tuning the network stack will be further examined in a future post. If you are unfamiliar with the network stack we highly recommend reading the network stack deep-dive first and then continuing here.
 Monitoring # Probably the most important part of network monitoring is monitoring backpressure, a situation where a system is receiving data at a higher rate than it can process¹. Such behaviour will result in the sender being backpressured and may be caused by two things:
 The receiver is slow.
 This can happen because the receiver is backpressured itself, is unable to keep processing at the same rate as the sender, or is temporarily blocked by garbage collection, lack of system resources, or I/O.
@@ -4098,13 +4108,13 @@
 By looking at the exposed latency tracking metrics for each subtask, for example at the 95th percentile, you should nevertheless be able to identify subtasks which are adding substantially to the overall source-to-sink latency and continue with optimising there.
 Note Flink's latency markers assume that the clocks on all machines in the cluster are in sync. We recommend setting up an automated clock synchronisation service (like NTP) to avoid false latency results. Warning Enabling latency metrics can significantly impact the performance of the cluster (in particular for \`subtask\` granularity) due to the sheer amount of metrics being added as well as the use of histograms which are quite expensive to maintain. It is highly recommended to only use them for debugging purposes. Conclusion # In the previous sections we discussed how to monitor Flink&rsquo;s network stack which primarily involves identifying backpressure: where it occurs, where it originates from, and (potentially) why it occurs. This can be executed in two ways: for simple cases and debugging sessions by using the backpressure monitor; for continuous monitoring, more in-depth analysis, and less runtime overhead by using Flink’s task and network stack metrics. Backpressure can be caused by the network layer itself but, in most cases, is caused by some subtask under high load. These two scenarios can be distinguished from one another by analysing the metrics as described above. We also provided some hints at monitoring resource usage and tracking network latencies that may add up from sources to sinks.
 Stay tuned for the third blog post in the series of network stack posts that will focus on tuning techniques and anti-patterns to avoid.
-`}),e.add({id:206,href:"/2019/07/02/apache-flink-1.8.1-released/",title:"Apache Flink 1.8.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.8 series.
+`}),e.add({id:207,href:"/2019/07/02/apache-flink-1.8.1-released/",title:"Apache Flink 1.8.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.8 series.
 This release includes more than 40 fixes and minor improvements for Flink 1.8.1. The list below includes a detailed list of all improvements, sub-tasks and bug fixes.
 We highly recommend all users to upgrade to Flink 1.8.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.8.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-10921] - Prioritize shard consumers in Kinesis Consumer by event time [FLINK-12617] - StandaloneJobClusterEntrypoint should default to random JobID for non-HA setups Bug [FLINK-9445] - scala-shell uses plain java command [FLINK-10455] - Potential Kafka producer leak in case of failures [FLINK-10941] - Slots prematurely released which still contain unconsumed data [FLINK-11059] - JobMaster may continue using an invalid slot if releasing idle slot meet a timeout [FLINK-11107] - Avoid memory stateBackend to create arbitrary folders under HA path when no checkpoint path configured [FLINK-11897] - ExecutionGraphSuspendTest does not wait for all tasks to be submitted [FLINK-11915] - DataInputViewStream skip returns wrong value [FLINK-11987] - Kafka producer occasionally throws NullpointerException [FLINK-12009] - Wrong check message about heartbeat interval for HeartbeatServices [FLINK-12042] - RocksDBStateBackend mistakenly uses default filesystem [FLINK-12112] - AbstractTaskManagerProcessFailureRecoveryTest process output logging does not work properly [FLINK-12132] - The example in /docs/ops/deployment/yarn_setup.md should be updated due to the change FLINK-2021 [FLINK-12184] - HistoryServerArchiveFetcher isn&#39;t compatible with old version [FLINK-12219] - Yarn application can&#39;t stop when flink job failed in per-job yarn cluster mode [FLINK-12247] - fix NPE when writing an archive file to a FileSystem [FLINK-12260] - Slot allocation failure by taskmanager registration timeout and race [FLINK-12296] - Data loss silently in RocksDBStateBackend when more than one operator(has states) chained in a single task [FLINK-12297] - Make ClosureCleaner recursive [FLINK-12301] - Scala value classes inside case classes cannot be serialized anymore in Flink 1.8.0 [FLINK-12342] - Yarn Resource Manager Acquires Too Many Containers [FLINK-12375] - flink-container job jar does not have read permissions [FLINK-12416] - Docker build script fails on symlink creation ln -s [FLINK-12544] - Deadlock while releasing memory and requesting segment concurrent in SpillableSubpartition [FLINK-12547] - Deadlock when the task thread downloads jars using BlobClient [FLINK-12646] - Use reserved IP as unrouteable IP in RestClientTest [FLINK-12688] - Make serializer lazy initialization thread safe in StateDescriptor [FLINK-12740] - SpillableSubpartitionTest deadlocks on Travis [FLINK-12835] - Time conversion is wrong in ManualClock [FLINK-12863] - Race condition between slot offerings and AllocatedSlotReport [FLINK-12865] - State inconsistency between RM and TM on the slot status [FLINK-12871] - Wrong SSL setup examples in docs [FLINK-12895] - TaskManagerProcessFailureBatchRecoveryITCase.testTaskManagerProcessFailure failed on travis [FLINK-12896] - TaskCheckpointStatisticDetailsHandler uses wrong value for JobID when archiving Improvement [FLINK-11126] - Filter out AMRMToken in the TaskManager credentials [FLINK-12137] - Add more proper explanation on flink streaming connectors [FLINK-12169] - Improve Javadoc of MessageAcknowledgingSourceBase [FLINK-12378] - Consolidate FileSystem Documentation [FLINK-12391] - Add timeout to transfer.sh [FLINK-12539] - StreamingFileSink: Make the class extendable to customize for different usecases Test [FLINK-12350] - RocksDBStateBackendTest doesn&#39;t cover the incremental checkpoint code path Task [FLINK-12460] - Change taskmanager.tmp.dirs to io.tmp.dirs in configuration docs `}),e.add({id:207,href:"/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/",title:"A Practical Guide to Broadcast State in Apache Flink",section:"Flink Blog",content:`Since version 1.5.0, Apache Flink features a new type of state which is called Broadcast State. In this post, we explain what Broadcast State is, and show an example of how it can be applied to an application that evaluates dynamic patterns on an event stream. We walk you through the processing steps and the source code to implement this application in practice.
+Sub-task [FLINK-10921] - Prioritize shard consumers in Kinesis Consumer by event time [FLINK-12617] - StandaloneJobClusterEntrypoint should default to random JobID for non-HA setups Bug [FLINK-9445] - scala-shell uses plain java command [FLINK-10455] - Potential Kafka producer leak in case of failures [FLINK-10941] - Slots prematurely released which still contain unconsumed data [FLINK-11059] - JobMaster may continue using an invalid slot if releasing idle slot meet a timeout [FLINK-11107] - Avoid memory stateBackend to create arbitrary folders under HA path when no checkpoint path configured [FLINK-11897] - ExecutionGraphSuspendTest does not wait for all tasks to be submitted [FLINK-11915] - DataInputViewStream skip returns wrong value [FLINK-11987] - Kafka producer occasionally throws NullpointerException [FLINK-12009] - Wrong check message about heartbeat interval for HeartbeatServices [FLINK-12042] - RocksDBStateBackend mistakenly uses default filesystem [FLINK-12112] - AbstractTaskManagerProcessFailureRecoveryTest process output logging does not work properly [FLINK-12132] - The example in /docs/ops/deployment/yarn_setup.md should be updated due to the change FLINK-2021 [FLINK-12184] - HistoryServerArchiveFetcher isn&#39;t compatible with old version [FLINK-12219] - Yarn application can&#39;t stop when flink job failed in per-job yarn cluster mode [FLINK-12247] - fix NPE when writing an archive file to a FileSystem [FLINK-12260] - Slot allocation failure by taskmanager registration timeout and race [FLINK-12296] - Data loss silently in RocksDBStateBackend when more than one operator(has states) chained in a single task [FLINK-12297] - Make ClosureCleaner recursive [FLINK-12301] - Scala value classes inside case classes cannot be serialized anymore in Flink 1.8.0 [FLINK-12342] - Yarn Resource Manager Acquires Too Many Containers [FLINK-12375] - flink-container job jar does not have read permissions [FLINK-12416] - Docker build script fails on symlink creation ln -s [FLINK-12544] - Deadlock while releasing memory and requesting segment concurrent in SpillableSubpartition [FLINK-12547] - Deadlock when the task thread downloads jars using BlobClient [FLINK-12646] - Use reserved IP as unrouteable IP in RestClientTest [FLINK-12688] - Make serializer lazy initialization thread safe in StateDescriptor [FLINK-12740] - SpillableSubpartitionTest deadlocks on Travis [FLINK-12835] - Time conversion is wrong in ManualClock [FLINK-12863] - Race condition between slot offerings and AllocatedSlotReport [FLINK-12865] - State inconsistency between RM and TM on the slot status [FLINK-12871] - Wrong SSL setup examples in docs [FLINK-12895] - TaskManagerProcessFailureBatchRecoveryITCase.testTaskManagerProcessFailure failed on travis [FLINK-12896] - TaskCheckpointStatisticDetailsHandler uses wrong value for JobID when archiving Improvement [FLINK-11126] - Filter out AMRMToken in the TaskManager credentials [FLINK-12137] - Add more proper explanation on flink streaming connectors [FLINK-12169] - Improve Javadoc of MessageAcknowledgingSourceBase [FLINK-12378] - Consolidate FileSystem Documentation [FLINK-12391] - Add timeout to transfer.sh [FLINK-12539] - StreamingFileSink: Make the class extendable to customize for different usecases Test [FLINK-12350] - RocksDBStateBackendTest doesn&#39;t cover the incremental checkpoint code path Task [FLINK-12460] - Change taskmanager.tmp.dirs to io.tmp.dirs in configuration docs `}),e.add({id:208,href:"/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/",title:"A Practical Guide to Broadcast State in Apache Flink",section:"Flink Blog",content:`Since version 1.5.0, Apache Flink features a new type of state which is called Broadcast State. In this post, we explain what Broadcast State is, and show an example of how it can be applied to an application that evaluates dynamic patterns on an event stream. We walk you through the processing steps and the source code to implement this application in practice.
 What is Broadcast State? # The Broadcast State can be used to combine and jointly process two streams of events in a specific way. The events of the first stream are broadcasted to all parallel instances of an operator, which maintains them as state. The events of the other stream are not broadcasted but sent to individual instances of the same operator and processed together with the events of the broadcasted stream. The new broadcast state is a natural fit for applications that need to join a low-throughput and a high-throughput stream or need to dynamically update their processing logic. We will use a concrete example of the latter use case to explain the broadcast state and show its API in more detail in the remainder of this post.
 Dynamic Pattern Evaluation with Broadcast State # Imagine an e-commerce website that captures the interactions of all users as a stream of user actions. The company that operates the website is interested in analyzing the interactions to increase revenue, improve the user experience, and detect and prevent malicious behavior. The website implements a streaming application that detects a pattern on the stream of user events. However, the company wants to avoid modifying and redeploying the application every time the pattern changes. Instead, the application ingests a second stream of patterns and updates its active pattern when it receives a new pattern from the pattern stream. In the following, we discuss this application step-by-step and show how it leverages the broadcast state feature in Apache Flink.
 Our example application ingests two data streams. The first stream provides user actions on the website and is illustrated on the top left side of the above figure. A user interaction event consists of the type of the action (user login, user logout, add to cart, or complete payment) and the id of the user, which is encoded by color. The user action event stream in our illustration contains a logout action of User 1001 followed by a payment-complete event for User 1003, and an “add-to-cart” action of User 1002.
@@ -4133,7 +4143,7 @@
 The broadcast state (read-write or read-only, depending on the method), A TimerService, which gives access to the record’s timestamp, the current watermark, and which can register timers, The current key (only available in processElement()), and A method to apply a function the keyed state of each registered key (only available in processBroadcastElement()) The KeyedBroadcastProcessFunction has full access to Flink state and time features just like any other ProcessFunction and hence can be used to implement sophisticated application logic. Broadcast state was designed to be a versatile feature that adapts to different scenarios and use cases. Although we only discussed a fairly simple and restricted application, you can use broadcast state in many ways to implement the requirements of your application.
 Conclusion # In this blog post, we walked you through an example application to explain what Apache Flink’s broadcast state is and how it can be used to evaluate dynamic patterns on event streams. We’ve also discussed the API and showed the source code of our example application.
 We invite you to check the documentation of this feature and provide feedback or suggestions for further improvements through our mailing list.
-`}),e.add({id:208,href:"/2019/06/05/a-deep-dive-into-flinks-network-stack/",title:"A Deep-Dive into Flink's Network Stack",section:"Flink Blog",content:` Flink’s network stack is one of the core components that make up the flink-runtime module and sit at the heart of every Flink job. It connects individual work units (subtasks) from all TaskManagers. This is where your streamed-in data flows through and it is therefore crucial to the performance of your Flink job for both the throughput as well as latency you observe. In contrast to the coordination channels between TaskManagers and JobManagers which are using RPCs via Akka, the network stack between TaskManagers relies on a much lower-level API using Netty.
+`}),e.add({id:209,href:"/2019/06/05/a-deep-dive-into-flinks-network-stack/",title:"A Deep-Dive into Flink's Network Stack",section:"Flink Blog",content:` Flink’s network stack is one of the core components that make up the flink-runtime module and sit at the heart of every Flink job. It connects individual work units (subtasks) from all TaskManagers. This is where your streamed-in data flows through and it is therefore crucial to the performance of your Flink job for both the throughput as well as latency you observe. In contrast to the coordination channels between TaskManagers and JobManagers which are using RPCs via Akka, the network stack between TaskManagers relies on a much lower-level API using Netty.
 This blog post is the first in a series of posts about the network stack. In the sections below, we will first have a high-level look at what abstractions are exposed to the stream operators and then go into detail on the physical implementation and various optimisations Flink did. We will briefly present the result of these optimisations and Flink’s trade-off between throughput and latency. Future blog posts in this series will elaborate more on monitoring and metrics, tuning parameters, and common anti-patterns.
 Logical View # Flink’s network stack provides the following logical view to the subtasks when communicating with each other, for example during a network shuffle as required by a keyBy().
 It abstracts over the different settings of the following three concepts:
@@ -4170,7 +4180,7 @@
 less synchronisation overhead (output flusher and RecordWriter are independent) in high-load scenarios where Netty is the bottleneck (either through backpressure or directly), we can still accumulate data in incomplete buffers significant reduction of Netty notifications However, you may notice an increased CPU use and TCP packet rate during low load scenarios. This is because, with the changes, Flink will use any available CPU cycles to try to maintain the desired latency. Once the load increases, this will self-adjust by buffers filling up more. High load scenarios are not affected and even get a better throughput because of the reduced synchronisation overhead. Buffer Builder &amp; Buffer Consumer # If you want to dig deeper into how the producer-consumer mechanics are implemented in Flink, please take a closer look at the BufferBuilder and BufferConsumer classes which have been introduced in Flink 1.5. While reading is potentially only per buffer, writing to it is per record and thus on the hot path for all network communication in Flink. Therefore, it was very clear to us that we needed a lightweight connection between the task’s thread and the Netty thread which does not imply too much synchronisation overhead. For further details, we suggest to check out the source code.
 Latency vs. Throughput # Network buffers were introduced to get higher resource utilisation and higher throughput at the cost of having some records wait in buffers a little longer. Although an upper limit to this wait time can be given via the buffer timeout, you may be curious to find out more about the trade-off between these two dimensions: latency and throughput, as, obviously, you cannot get both. The following plot shows various values for the buffer timeout starting at 0 (flush with every record) to 100ms (the default) and shows the resulting throughput rates on a cluster with 100 nodes and 8 slots each running a job that has no business logic and thus only tests the network stack. For comparison, we also plot Flink 1.4 before the low-latency improvements (as described above) were added. As you can see, with Flink 1.5+, even very low buffer timeouts such as 1ms (for low-latency scenarios) provide a maximum throughput as high as 75% of the default timeout where more data is buffered before being sent over the wire.
 Conclusion # Now you know about result partitions, the different network connections and scheduling types for both batch and streaming. You also know about credit-based flow control and how the network stack works internally, in order to reason about network-related tuning parameters and about certain job behaviours. Future blog posts in this series will build upon this knowledge and go into more operational details including relevant metrics to look at, further network stack tuning, and common antipatterns to avoid. Stay tuned for more.
-`}),e.add({id:209,href:"/2019/05/17/state-ttl-in-flink-1.8.0-how-to-automatically-cleanup-application-state-in-apache-flink/",title:"State TTL in Flink 1.8.0: How to Automatically Cleanup Application State in Apache Flink",section:"Flink Blog",content:`A common requirement for many stateful streaming applications is to automatically cleanup application state for effective management of your state size, or to control how long the application state can be accessed (e.g. due to legal regulations like the GDPR). The state time-to-live (TTL) feature was initiated in Flink 1.6.0 and enabled application state cleanup and efficient state size management in Apache Flink.
+`}),e.add({id:210,href:"/2019/05/17/state-ttl-in-flink-1.8.0-how-to-automatically-cleanup-application-state-in-apache-flink/",title:"State TTL in Flink 1.8.0: How to Automatically Cleanup Application State in Apache Flink",section:"Flink Blog",content:`A common requirement for many stateful streaming applications is to automatically cleanup application state for effective management of your state size, or to control how long the application state can be accessed (e.g. due to legal regulations like the GDPR). The state time-to-live (TTL) feature was initiated in Flink 1.6.0 and enabled application state cleanup and efficient state size management in Apache Flink.
 In this post, we motivate the State TTL feature and discuss its use cases. Moreover, we show how to use and configure it. We explain how Flink internally manages state with TTL and present some exciting additions to the feature in Flink 1.8.0. The blog post concludes with an outlook on future improvements and extensions.
 The Transient Nature of State # There are two major reasons why state should be maintained only for a limited time. For example, let’s imagine a Flink application that ingests a stream of user login events and stores for each user the time of the last login to improve the experience of frequent visitors.
 Controlling the size of state. Being able to efficiently manage an ever-growing state size is a primary use case for state TTL. Oftentimes, data needs to be persisted temporarily while there is some user activity around it, e.g. web sessions. When the activity ends there is no longer interest in that data while it still occupies storage. Flink 1.8.0 introduces background cleanup of old state based on TTL that makes the eviction of no-longer-necessary data frictionless. Previously, the application developer had to take extra actions and explicitly remove useless state to free storage space. This manual clean up procedure was not only error prone but also less efficient than the new lazy method to remove state. Following our previous example of storing the time of the last login, this might not be necessary after some time because the user can be treated as “infrequent” later on.
@@ -4199,7 +4209,7 @@
 Future work # Apart from including the timer-based cleanup strategy, mentioned above, the Flink community has plans to further improve the state TTL feature. The possible improvements include adding support of TTL for event time scale (only processing time is supported at the moment) and enabling State TTL for queryable state.
 We encourage you to join the conversation and share your thoughts and ideas in the Apache Flink JIRA board or by subscribing to the Apache Flink dev mailing list. Feedback or suggestions are always appreciated and we look forward to hearing your thoughts on the Flink mailing lists.
 Summary # Time-based state access restrictions and controlling the size of application state are common challenges in the world of stateful stream processing. Flink’s 1.8.0 release significantly improves the State TTL feature by adding support for continuous background cleanup of expired state objects. The new clean up mechanisms relieve you from manually implementing state cleanup. They are also more efficient due to their lazy nature. State TTL gives you control over the size of your application state so that you can focus on the core logic of your applications.
-`}),e.add({id:210,href:"/2019/05/14/flux-capacitor-huh-temporal-tables-and-joins-in-streaming-sql/",title:"Flux capacitor, huh? Temporal Tables and Joins in Streaming SQL",section:"Flink Blog",content:`Figuring out how to manage and model temporal data for effective point-in-time analysis was a longstanding battle, dating as far back as the early 80’s, that culminated with the introduction of temporal tables in the SQL standard in 2011. Up to that point, users were doomed to implement this as part of the application logic, often hurting the length of the development lifecycle as well as the maintainability of the code. And, although there isn’t a single, commonly accepted definition of temporal data, the challenge it represents is one and the same: how do we validate or enrich data against dynamically changing, historical datasets?
+`}),e.add({id:211,href:"/2019/05/14/flux-capacitor-huh-temporal-tables-and-joins-in-streaming-sql/",title:"Flux capacitor, huh? Temporal Tables and Joins in Streaming SQL",section:"Flink Blog",content:`Figuring out how to manage and model temporal data for effective point-in-time analysis was a longstanding battle, dating as far back as the early 80’s, that culminated with the introduction of temporal tables in the SQL standard in 2011. Up to that point, users were doomed to implement this as part of the application logic, often hurting the length of the development lifecycle as well as the maintainability of the code. And, although there isn’t a single, commonly accepted definition of temporal data, the challenge it represents is one and the same: how do we validate or enrich data against dynamically changing, historical datasets?
 For example: given a stream with Taxi Fare events tied to the local currency of the ride location, we might want to convert the fare price to a common currency for further processing. As conversion rates excel at fluctuating over time, each Taxi Fare event would need to be matched to the rate that was valid at the time the event occurred in order to produce a reliable result.
 Modelling Temporal Data with Flink # In the 1.7 release, Flink has introduced the concept of temporal tables into its streaming SQL and Table API: parameterized views on append-only tables — or, any table that only allows records to be inserted, never updated or deleted — that are interpreted as a changelog and keep data closely tied to time context, so that it can be interpreted as valid only within a specific period of time. Transforming a stream into a temporal table requires:
 Defining a primary key and a versioning field that can be used to keep track of the changes that happen over time;
@@ -4214,7 +4224,7 @@
 Narrowing the scope of the join: only the time-matching version of ratesHistory is visible for a given taxiFare.time; Pruning unneeded records from state: for cases using event time, records between current time and the watermark delay are persisted for both the probe and build side. These are discarded as soon as the watermark arrives and the results are emitted — allowing the join operation to move forward in time and the build table to “refresh” its version in state. Conclusion # All this means it is now possible to express continuous stream enrichment in relational and time-varying terms using Flink without dabbling into syntactic patchwork or compromising performance. In other words: stream time-travelling minus the flux capacitor. Extending this syntax to batch processing for enriching historic data with proper (event) time semantics is also part of the Flink roadmap!
 If you&rsquo;d like to get some hands-on practice in joining streams with Flink SQL (and Flink SQL in general), checkout this free training for Flink SQL. The training environment is based on Docker and set up in just a few minutes.
 Subscribe to the Apache Flink mailing lists to stay up-to-date with the latest developments in this space.
-`}),e.add({id:211,href:"/2019/05/03/when-flink-pulsar-come-together/",title:"When Flink & Pulsar Come Together",section:"Flink Blog",content:`The open source data technology frameworks Apache Flink and Apache Pulsar can integrate in different ways to provide elastic data processing at large scale. I recently gave a talk at Flink Forward San Francisco 2019 and presented some of the integrations between the two frameworks for batch and streaming applications. In this post, I will give a short introduction to Apache Pulsar and its differentiating elements from other messaging systems and describe the ways that Pulsar and Flink can work together to provide a seamless developer experience for elastic data processing at scale.
+`}),e.add({id:212,href:"/2019/05/03/when-flink-pulsar-come-together/",title:"When Flink & Pulsar Come Together",section:"Flink Blog",content:`The open source data technology frameworks Apache Flink and Apache Pulsar can integrate in different ways to provide elastic data processing at large scale. I recently gave a talk at Flink Forward San Francisco 2019 and presented some of the integrations between the two frameworks for batch and streaming applications. In this post, I will give a short introduction to Apache Pulsar and its differentiating elements from other messaging systems and describe the ways that Pulsar and Flink can work together to provide a seamless developer experience for elastic data processing at scale.
 A brief introduction to Apache Pulsar # Apache Pulsar is an open-source distributed pub-sub messaging system under the stewardship of the Apache Software Foundation. Pulsar is a multi-tenant, high-performance solution for server-to-server messaging including multiple features such as native support for multiple clusters in a Pulsar instance, with seamless geo-replication of messages across clusters, very low publish and end-to-end latency, seamless scalability to over a million topics, and guaranteed message delivery with persistent message storage provided by Apache BookKeeper among others. Let’s now discuss the primary differentiators between Pulsar and other pub-sub messaging frameworks:
 The first differentiating factor stems from the fact that although Pulsar provides a flexible pub-sub messaging system it is also backed by durable log storage — hence combining both messaging and storage under one framework. Because of that layered architecture, Pulsar provides instant failure recovery, independent scalability and balance-free cluster expansion.
 Pulsar’s architecture follows a similar pattern to other pub-sub systems as the framework is organized in topics as the main data entity, with producers sending data to, and consumers receiving data from a topic as shown in the diagram below.
@@ -4230,7 +4240,7 @@
 // create and configure Pulsar consumer PulsarSourceBuilder&lt;String&gt;builder = PulsarSourceBuilder .builder(new SimpleStringSchema()) .serviceUrl(serviceUrl) .topic(inputTopic) .subscriptionName(subscription); SourceFunction&lt;String&gt; src = builder.build(); // ingest DataStream with Pulsar consumer DataStream&lt;String&gt; words = env.addSource(src); // perform computation on DataStream (here a simple WordCount) DataStream&lt;WordWithCount&gt; wc = words .flatMap((FlatMapFunction&lt;String, WordWithCount&gt;) (word, collector) -&gt; { collector.collect(new WordWithCount(word, 1)); }) .returns(WordWithCount.class) .keyBy(&#34;word&#34;) .timeWindow(Time.seconds(5)) .reduce((ReduceFunction&lt;WordWithCount&gt;) (c1, c2) -&gt; new WordWithCount(c1.word, c1.count + c2.count)); // emit result via Pulsar producer wc.addSink(new FlinkPulsarProducer&lt;&gt;( serviceUrl, outputTopic, new AuthenticationDisabled(), wordWithCount -&gt; wordWithCount.toString().getBytes(UTF_8), wordWithCount -&gt; wordWithCount.word) ); Another integration between the two frameworks that developers can take advantage of includes using Pulsar as both a streaming source and a streaming table sink for Flink SQL or Table API queries as shown in the example below:
 // obtain a DataStream with words DataStream&lt;String&gt; words = ... // register DataStream as Table &#34;words&#34; with two attributes (&#34;word&#34;, &#34;ts&#34;). // &#34;ts&#34; is an event-time timestamp. tableEnvironment.registerDataStream(&#34;words&#34;, words, &#34;word, ts.rowtime&#34;); // create a TableSink that produces to Pulsar TableSink sink = new PulsarJsonTableSink( serviceUrl, outputTopic, new AuthenticationDisabled(), ROUTING_KEY); // register Pulsar TableSink as table &#34;wc&#34; tableEnvironment.registerTableSink( &#34;wc&#34;, sink.configure( new String[]{&#34;word&#34;, &#34;cnt&#34;}, new TypeInformation[]{Types.STRING, Types.LONG})); // count words per 5 seconds and write result to table &#34;wc&#34; tableEnvironment.sqlUpdate( &#34;INSERT INTO wc &#34; + &#34;SELECT word, COUNT(*) AS cnt &#34; + &#34;FROM words &#34; + &#34;GROUP BY word, TUMBLE(ts, INTERVAL &#39;5&#39; SECOND)&#34;); Finally, Flink integrates with Pulsar for batch workloads as a batch sink where all results get pushed to Pulsar after Apache Flink has completed the computation in a static data set. Such an example is shown below:
 // obtain DataSet from arbitrary computation DataSet&lt;WordWithCount&gt; wc = ... // create PulsarOutputFormat instance OutputFormat pulsarOutputFormat = new PulsarOutputFormat( serviceUrl, topic, new AuthenticationDisabled(), wordWithCount -&gt; wordWithCount.toString().getBytes()); // write DataSet to Pulsar wc.output(pulsarOutputFormat); Conclusion # Both Pulsar and Flink share a similar view on how the data and the computation level of an application can be “streaming-first” with batch as a special case streaming. With Pulsar’s Segmented Streams approach and Flink’s steps to unify batch and stream processing workloads under one framework, there are numerous ways of integrating the two technologies together to provide elastic data processing at massive scale. Subscribe to the Apache Flink and Apache Pulsar mailing lists to stay up-to-date with the latest developments in this space or share your thoughts and recommendations with both communities.
-`}),e.add({id:212,href:"/2019/04/17/apache-flinks-application-to-season-of-docs/",title:"Apache Flink's Application to Season of Docs",section:"Flink Blog",content:`The Apache Flink community is happy to announce its application to the first edition of Season of Docs by Google. The program is bringing together Open Source projects and technical writers to raise awareness for and improve documentation of Open Source projects. While the community is continuously looking for new contributors to collaborate on our documentation, we would like to take this chance to work with one or two technical writers to extend and restructure parts of our documentation (details below).
+`}),e.add({id:213,href:"/2019/04/17/apache-flinks-application-to-season-of-docs/",title:"Apache Flink's Application to Season of Docs",section:"Flink Blog",content:`The Apache Flink community is happy to announce its application to the first edition of Season of Docs by Google. The program is bringing together Open Source projects and technical writers to raise awareness for and improve documentation of Open Source projects. While the community is continuously looking for new contributors to collaborate on our documentation, we would like to take this chance to work with one or two technical writers to extend and restructure parts of our documentation (details below).
 The community has discussed this opportunity on the dev mailinglist and agreed on three project ideas to submit to the program. We have a great team of mentors (Stephan, Fabian, David, Jark &amp; Konstantin) lined up and are very much looking forward to the first proposals by potential technical writers (given we are admitted to the program ;)). In case of questions feel free to reach out to the community via dev@flink.apache.org.
 Project Ideas List # Project 1: Improve Documentation of Stream Processing Concepts # Description: Stream processing is the processing of data in motion―in other words, computing on data directly as it is produced or received. Apache Flink has pioneered the field of distributed, stateful stream processing over the last several years. As the community has pushed the boundaries of stream processing, we have introduced new concepts that users need to become familiar with to develop and operate Apache Flink applications efficiently. The Apache Flink documentation [1] already contains a “concepts” section, but it is a ) incomplete and b) lacks an overall structure &amp; reading flow. In addition, “concepts”-content is also spread over the development [2] &amp; operations [3] documentation without references to the “concepts” section. An example of this can be found in [4] and [5].
 In this project, we would like to restructure, consolidate and extend the concepts documentation for Apache Flink to better guide users who want to become productive as quickly as possible. This includes better conceptual introductions to topics such as event time, state, and fault tolerance with proper linking to and from relevant deployment and development guides.
@@ -4241,7 +4251,7 @@
 //nightlies.apache.org/flink/flink-docs-release-1.8/ops //nightlies.apache.org/flink/flink-docs-release-1.8/monitoring Project 3: Improve Documentation for Relational APIs (Table API &amp; SQL) # Description: Apache Flink features APIs at different levels of abstraction which enables its users to trade conciseness for expressiveness. Flink’s relational APIs, SQL and the Table API, are “younger” than the DataStream and DataSet APIs, more high-level and focus on data analytics use cases. A core principle of Flink’s SQL and Table API is that they can be used to process static (batch) and continuous (streaming) data and that a program or query produces the same result in both cases. The documentation of Flink’s relational APIs has organically grown and can be improved in a few areas. There are several on-going development efforts (e.g. Hive Integration, Python Support or Support for Interactive Programming) that aim to extend the scope of the Table API and SQL.
 The existing documentation could be reorganized to prepare for covering the new features. Moreover, it could be improved by adding a concepts section that describes the use cases and internals of the APIs in more detail. Moreover, the documentation of built-in functions could be improved by adding more concrete examples.
 Related material:
-Table API &amp; SQL docs main page Built-in functions Concepts Streaming Concepts `}),e.add({id:213,href:"/2019/04/09/apache-flink-1.8.0-release-announcement/",title:"Apache Flink 1.8.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce Apache Flink 1.8.0. The latest release includes more than 420 resolved issues and some exciting additions to Flink that we describe in the following sections of this post. Please check the complete changelog for more details.
+Table API &amp; SQL docs main page Built-in functions Concepts Streaming Concepts `}),e.add({id:214,href:"/2019/04/09/apache-flink-1.8.0-release-announcement/",title:"Apache Flink 1.8.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce Apache Flink 1.8.0. The latest release includes more than 420 resolved issues and some exciting additions to Flink that we describe in the following sections of this post. Please check the complete changelog for more details.
 Flink 1.8.0 is API-compatible with previous 1.x.y releases for APIs annotated with the @Public annotation. The release is available now and we encourage everyone to download the release and check out the updated documentation. Feedback through the Flink mailing lists or JIRA is, as always, very much appreciated!
 You can find the binaries on the updated Downloads page on the Flink project site.
 With Flink 1.8.0 we come closer to our goals of enabling fast data processing and building data-intensive applications for the Flink community in a seamless way. We do this by cleaning up and refactoring Flink under the hood to allow more efficient feature development in the future. This includes removal of the legacy runtime components that were subsumed in the major rework of Flink&rsquo;s underlying distributed system architecture (FLIP-6) as well as refactorings on the Table API that prepare it for the future addition of the Blink enhancements (FLINK-11439).
@@ -4270,7 +4280,7 @@
 Known Issues # Discarded checkpoint can cause Tasks to fail (FLINK-11662): There is a race condition that can lead to erroneous checkpoint failures. This mostly occurs when restarting from a savepoint or checkpoint takes a long time at the sources of a job. If you see random checkpointing failures that don&rsquo;t seem to have a good explanation you might be affected. Please see the Jira issue for more details and a workaround for the problem. Release Notes # Please review the release notes for a more detailed list of changes and new features if you plan to upgrade your Flink setup to Flink 1.8.
 List of Contributors # We would like to acknowledge all community members for contributing to this release. Special credits go to the following members for contributing to the 1.8.0 release (according to git log --pretty=&quot;%an&quot; release-1.7.0..release-1.8.0 | sort | uniq without manual deduplication):
 Addison Higham, Aitozi, Aleksey Pak, Alexander Fedulov, Alexey Trenikhin, Aljoscha Krettek, Andrey Zagrebin, Artsem Semianenka, Asura7969, Avi, Barisa Obradovic, Benchao Li, Bo WANG, Chesnay Schepler, Congxian Qiu, Cristian, David Anderson, Dawid Wysakowicz, Dian Fu, DuBin, EAlexRojas, EronWright, Eugen Yushin, Fabian Hueske, Fokko Driesprong, Gary Yao, Hequn Cheng, Igal Shilman, Jamie Grier, JaryZhen, Jeff Zhang, Jihyun Cho, Jinhu Wu, Joerg Schad, KarmaGYZ, Kezhu Wang, Konstantin Knauf, Kostas Kloudas, Lakshmi, Lakshmi Gururaja Rao, Lavkesh Lahngir, Li, Shuangjiang, Mai Nakagawa, Matrix42, Matt, Maximilian Michels, Mododo, Nico Kruber, Paul Lin, Piotr Nowojski, Qi Yu, Qin, Robert, Robert Metzger, Romano Vacca, Rong Rong, Rune Skou Larsen, Seth Wiesman, Shannon Carey, Shimin Yang, Shuyi Chen, Stefan Richter, Stephan Ewen, SuXingLee, TANG Wen-hui, Tao Yang, Thomas Weise, Till Rohrmann, Timo Walther, Tom Goong, Tony Feng, Tony Wei, Tzu-Li (Gordon) Tai, Tzu-Li Chen, Ufuk Celebi, Xingcan Cui, Xpray, XuQianJin-Stars, Xue Yu, Yangze Guo, Ying Xu, Yiqun Lin, Yu Li, Yuanyang Wu, Yun Tang, ZILI CHEN, Zhanchun Zhang, Zhijiang, ZiLi Chen, acqua.csq, alex04.wang, ap, azagrebin, blueszheng, boshu Zheng, chengjie.wu, chensq, chummyhe89, eaglewatcherwb, hequn8128, ifndef-SleePy, intsmaze, jackyyin, jinhu.wjh, jparkie, jrthe42, junsheng.wu, kgorman, kkloudas, kkolman, klion26, lamber-ken, leesf, libenchao, lining, liuzhaokun, lzh3636, maqingxiang, mb-datadome, okidogi, park.yq, sunhaibotb, sunjincheng121, tison, unknown, vinoyang, wenhuitang, wind, xueyu, xuqianjin, yanghua, zentol, zhangzhanchun, zhijiang, zhuzhu.zz, zy, 仲炜, 砚田, 谢磊
-`}),e.add({id:214,href:"/2019/03/11/flink-and-prometheus-cloud-native-monitoring-of-streaming-applications/",title:"Flink and Prometheus: Cloud-native monitoring of streaming applications",section:"Flink Blog",content:`This blog post describes how developers can leverage Apache Flink&rsquo;s built-in metrics system together with Prometheus to observe and monitor streaming applications in an effective way. This is a follow-up post from my Flink Forward Berlin 2018 talk (slides, video). We will cover some basic Prometheus concepts and why it is a great fit for monitoring Apache Flink stream processing jobs. There is also an example to showcase how you can utilize Prometheus with Flink to gain insights into your applications and be alerted on potential degradations of your Flink jobs.
+`}),e.add({id:215,href:"/2019/03/11/flink-and-prometheus-cloud-native-monitoring-of-streaming-applications/",title:"Flink and Prometheus: Cloud-native monitoring of streaming applications",section:"Flink Blog",content:`This blog post describes how developers can leverage Apache Flink&rsquo;s built-in metrics system together with Prometheus to observe and monitor streaming applications in an effective way. This is a follow-up post from my Flink Forward Berlin 2018 talk (slides, video). We will cover some basic Prometheus concepts and why it is a great fit for monitoring Apache Flink stream processing jobs. There is also an example to showcase how you can utilize Prometheus with Flink to gain insights into your applications and be alerted on potential degradations of your Flink jobs.
 Why Prometheus? # Prometheus is a metrics-based monitoring system that was originally created in 2012. The system is completely open-source (under the Apache License 2) with a vibrant community behind it and it has graduated from the Cloud Native Foundation last year – a sign of maturity, stability and production-readiness. As we mentioned, the system is based on metrics and it is designed to measure the overall health, behavior and performance of a service. Prometheus features a multi-dimensional data model as well as a flexible query language. It is designed for reliability and can easily be deployed in traditional or containerized environments. Some of the important Prometheus concepts are:
 Metrics: Prometheus defines metrics as floats of information that change in time. These time series have millisecond precision.
 Labels are the key-value pairs associated with time series that support Prometheus&rsquo; flexible and powerful data model – in contrast to hierarchical data structures that one might experience with traditional metrics systems.
@@ -4293,7 +4303,7 @@
 Example alert in Prometheus web UI. In real-world situations alerts like this one can be routed through a component called Alertmanager and be grouped into notifications to systems like email, PagerDuty or Slack.
 Go ahead and play around with the setup, and check out the Grafana instance reachable at http://localhost:3000 (credentials admin:flink) for visualizing Prometheus metrics. If there are any questions or problems, feel free to create an issue. Once finished, do not forget to tear down the setup via
 ./gradlew composeDown Conclusion # Using Prometheus together with Flink provides an easy way for effective monitoring and alerting of your Flink jobs. Both projects have exciting and vibrant communities behind them with new developments and additions scheduled for upcoming releases. We encourage you to try the two technologies together as it has immensely improved our insights into Flink jobs running in production.
-`}),e.add({id:215,href:"/2019/03/06/what-to-expect-from-flink-forward-san-francisco-2019/",title:"What to expect from Flink Forward San Francisco 2019",section:"Flink Blog",content:`The third annual Flink Forward San Francisco is just a few weeks away! As always, Flink Forward will be the right place to meet and mingle with experienced Flink users, contributors, and committers. Attendees will hear and chat about the latest developments around Flink and learn from technical deep-dive sessions and exciting use cases that were put into production with Flink. The event will take place on April 1-2, 2019 at Hotel Nikko in San Francisco. The program committee assembled an amazing lineup of speakers who will cover many different aspects of Apache Flink and stream processing.
+`}),e.add({id:216,href:"/2019/03/06/what-to-expect-from-flink-forward-san-francisco-2019/",title:"What to expect from Flink Forward San Francisco 2019",section:"Flink Blog",content:`The third annual Flink Forward San Francisco is just a few weeks away! As always, Flink Forward will be the right place to meet and mingle with experienced Flink users, contributors, and committers. Attendees will hear and chat about the latest developments around Flink and learn from technical deep-dive sessions and exciting use cases that were put into production with Flink. The event will take place on April 1-2, 2019 at Hotel Nikko in San Francisco. The program committee assembled an amazing lineup of speakers who will cover many different aspects of Apache Flink and stream processing.
 Some highlights of the program are:
 Realtime Store Visit Predictions at Scale: Luca Giovagnoli from Yelp will talk about a &ldquo;multidisciplinary&rdquo; Flink application that combines geospatial clustering algorithms, Machine Learning models, and cutting-edge stream-processing technology.
 Real-time Processing with Flink for Machine Learning at Netflix: Elliot Chow will discuss the practical aspects of using Apache Flink to power Machine Learning algorithms for video recommendations, search results ranking, and selection of artwork images at Netflix.
@@ -4307,13 +4317,13 @@
 Troubleshooting and Operating Flink at large scale: In this training, we will focus on everything you need to run Apache Flink applications reliably and efficiently in production including topics like capacity planning, monitoring, troubleshooting and tuning Apache Flink.
 If you haven&rsquo;t done so yet, check out the full schedule and register your attendance. I&rsquo;m looking forward to meet you at Flink Forward San Francisco.
 Fabian
-`}),e.add({id:216,href:"/2019/02/25/apache-flink-1.6.4-released/",title:"Apache Flink 1.6.4 Released",section:"Flink Blog",content:`The Apache Flink community released the fourth bugfix version of the Apache Flink 1.6 series.
+`}),e.add({id:217,href:"/2019/02/25/apache-flink-1.6.4-released/",title:"Apache Flink 1.6.4 Released",section:"Flink Blog",content:`The Apache Flink community released the fourth bugfix version of the Apache Flink 1.6 series.
 This release includes more than 25 fixes and minor improvements for Flink 1.6.3. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.6.4.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.6.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.4&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Bug [FLINK-10721] - Kafka discovery-loop exceptions may be swallowed [FLINK-10761] - MetricGroup#getAllVariables can deadlock [FLINK-10774] - connection leak when partition discovery is disabled and open throws exception [FLINK-10848] - Flink&#39;s Yarn ResourceManager can allocate too many excess containers [FLINK-11022] - Update LICENSE and NOTICE files for older releases [FLINK-11071] - Dynamic proxy classes cannot be resolved when deserializing job graph [FLINK-11084] - Incorrect ouput after two consecutive split and select [FLINK-11119] - Incorrect Scala example for Table Function [FLINK-11134] - Invalid REST API request should not log the full exception in Flink logs [FLINK-11151] - FileUploadHandler stops working if the upload directory is removed [FLINK-11173] - Proctime attribute validation throws an incorrect exception message [FLINK-11224] - Log is missing in scala-shell [FLINK-11232] - Empty Start Time of sub-task on web dashboard [FLINK-11234] - ExternalTableCatalogBuilder unable to build a batch-only table [FLINK-11235] - Elasticsearch connector leaks threads if no connection could be established [FLINK-11251] - Incompatible metric name on prometheus reporter [FLINK-11389] - Incorrectly use job information when call getSerializedTaskInformation in class TaskDeploymentDescriptor [FLINK-11584] - ConfigDocsCompletenessITCase fails DescriptionBuilder#linebreak() is used [FLINK-11585] - Prefix matching in ConfigDocsGenerator can result in wrong assignments Improvement [FLINK-10910] - Harden Kubernetes e2e test [FLINK-11079] - Skip deployment for flnk-storm-examples [FLINK-11207] - Update Apache commons-compress from 1.4.1 to 1.18 [FLINK-11262] - Bump jython-standalone to 2.7.1 [FLINK-11289] - Rework example module structure to account for licensing [FLINK-11304] - Typo in time attributes doc [FLINK-11469] - fix Tuning Checkpoints and Large State doc `}),e.add({id:217,href:"/2019/02/21/monitoring-apache-flink-applications-101/",title:"Monitoring Apache Flink Applications 101",section:"Flink Blog",content:` This blog post provides an introduction to Apache Flink’s built-in monitoring and metrics system, that allows developers to effectively monitor their Flink jobs. Oftentimes, the task of picking the relevant metrics to monitor a Flink application can be overwhelming for a DevOps team that is just starting with stream processing and Apache Flink. Having worked with many organizations that deploy Flink at scale, I would like to share my experience and some best practice with the community.
+Bug [FLINK-10721] - Kafka discovery-loop exceptions may be swallowed [FLINK-10761] - MetricGroup#getAllVariables can deadlock [FLINK-10774] - connection leak when partition discovery is disabled and open throws exception [FLINK-10848] - Flink&#39;s Yarn ResourceManager can allocate too many excess containers [FLINK-11022] - Update LICENSE and NOTICE files for older releases [FLINK-11071] - Dynamic proxy classes cannot be resolved when deserializing job graph [FLINK-11084] - Incorrect ouput after two consecutive split and select [FLINK-11119] - Incorrect Scala example for Table Function [FLINK-11134] - Invalid REST API request should not log the full exception in Flink logs [FLINK-11151] - FileUploadHandler stops working if the upload directory is removed [FLINK-11173] - Proctime attribute validation throws an incorrect exception message [FLINK-11224] - Log is missing in scala-shell [FLINK-11232] - Empty Start Time of sub-task on web dashboard [FLINK-11234] - ExternalTableCatalogBuilder unable to build a batch-only table [FLINK-11235] - Elasticsearch connector leaks threads if no connection could be established [FLINK-11251] - Incompatible metric name on prometheus reporter [FLINK-11389] - Incorrectly use job information when call getSerializedTaskInformation in class TaskDeploymentDescriptor [FLINK-11584] - ConfigDocsCompletenessITCase fails DescriptionBuilder#linebreak() is used [FLINK-11585] - Prefix matching in ConfigDocsGenerator can result in wrong assignments Improvement [FLINK-10910] - Harden Kubernetes e2e test [FLINK-11079] - Skip deployment for flnk-storm-examples [FLINK-11207] - Update Apache commons-compress from 1.4.1 to 1.18 [FLINK-11262] - Bump jython-standalone to 2.7.1 [FLINK-11289] - Rework example module structure to account for licensing [FLINK-11304] - Typo in time attributes doc [FLINK-11469] - fix Tuning Checkpoints and Large State doc `}),e.add({id:218,href:"/2019/02/21/monitoring-apache-flink-applications-101/",title:"Monitoring Apache Flink Applications 101",section:"Flink Blog",content:` This blog post provides an introduction to Apache Flink’s built-in monitoring and metrics system, that allows developers to effectively monitor their Flink jobs. Oftentimes, the task of picking the relevant metrics to monitor a Flink application can be overwhelming for a DevOps team that is just starting with stream processing and Apache Flink. Having worked with many organizations that deploy Flink at scale, I would like to share my experience and some best practice with the community.
 With business-critical applications running on Apache Flink, performance monitoring becomes an increasingly important part of a successful production deployment. It ensures that any degradation or downtime is immediately identified and resolved as quickly as possible.
 Monitoring goes hand-in-hand with observability, which is a prerequisite for troubleshooting and performance tuning. Nowadays, with the complexity of modern enterprise applications and the speed of delivery increasing, an engineering team must understand and have a complete overview of its applications’ status at any given point in time.
 Flink’s Metrics System # The foundation for monitoring Flink jobs is its metrics system which consists of two components; Metrics and MetricsReporters.
@@ -4366,13 +4376,13 @@
 TaskManager & JobManager CPU load. System Resources # In addition to the JVM metrics above, it is also possible to use Flink’s metrics system to gather insights about system resources, i.e. memory, CPU &amp; network-related metrics for the whole machine as opposed to the Flink processes alone. System resource monitoring is disabled by default and requires additional dependencies on the classpath. Please check out the Flink system resource metrics documentation for additional guidance and details. System resource monitoring in Flink can be very helpful in setups without existing host monitoring capabilities.
 Conclusion # This post tries to shed some light on Flink’s metrics and monitoring system. You can utilise it as a starting point when you first think about how to successfully monitor your Flink application. I highly recommend to start monitoring your Flink application early on in the development phase. This way you will be able to improve your dashboards and alerts over time and, more importantly, observe the performance impact of the changes to your application throughout the development phase. By doing so, you can ask the right questions about the runtime behaviour of your application, and learn much more about Flink’s internals early on.
 Last but not least, this post only scratches the surface of the overall metrics and monitoring capabilities of Apache Flink. I highly recommend going over Flink’s metrics documentation for a full reference of Flink’s metrics system.
-`}),e.add({id:218,href:"/2019/02/15/apache-flink-1.7.2-released/",title:"Apache Flink 1.7.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.7 series.
+`}),e.add({id:219,href:"/2019/02/15/apache-flink-1.7.2-released/",title:"Apache Flink 1.7.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.7 series.
 This release includes more than 40 fixes and minor improvements for Flink 1.7.1, covering several critical recovery issues as well as problems in the Flink streaming connectors.
 The list below includes a detailed list of all fixes. We highly recommend all users to upgrade to Flink 1.7.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.7.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.7.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.7.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-11179] - JoinCancelingITCase#testCancelSortMatchWhileDoingHeavySorting test error [FLINK-11180] - ProcessFailureCancelingITCase#testCancelingOnProcessFailure [FLINK-11181] - SimpleRecoveryITCaseBase test error Bug [FLINK-10721] - Kafka discovery-loop exceptions may be swallowed [FLINK-10761] - MetricGroup#getAllVariables can deadlock [FLINK-10774] - connection leak when partition discovery is disabled and open throws exception [FLINK-10848] - Flink&#39;s Yarn ResourceManager can allocate too many excess containers [FLINK-11046] - ElasticSearch6Connector cause thread blocked when index failed with retry [FLINK-11071] - Dynamic proxy classes cannot be resolved when deserializing job graph [FLINK-11083] - CRowSerializerConfigSnapshot is not instantiable [FLINK-11084] - Incorrect ouput after two consecutive split and select [FLINK-11100] - Presto S3 FileSystem E2E test broken [FLINK-11119] - Incorrect Scala example for Table Function [FLINK-11134] - Invalid REST API request should not log the full exception in Flink logs [FLINK-11145] - Fix Hadoop version handling in binary release script [FLINK-11151] - FileUploadHandler stops working if the upload directory is removed [FLINK-11168] - LargePlanTest times out on Travis [FLINK-11173] - Proctime attribute validation throws an incorrect exception message [FLINK-11187] - StreamingFileSink with S3 backend transient socket timeout issues [FLINK-11191] - Exception in code generation when ambiguous columns in MATCH_RECOGNIZE [FLINK-11194] - missing Scala 2.12 build of HBase connector [FLINK-11201] - Document SBT dependency requirements when using MiniClusterResource [FLINK-11224] - Log is missing in scala-shell [FLINK-11227] - The DescriptorProperties contains some bounds checking errors [FLINK-11232] - Empty Start Time of sub-task on web dashboard [FLINK-11234] - ExternalTableCatalogBuilder unable to build a batch-only table [FLINK-11235] - Elasticsearch connector leaks threads if no connection could be established [FLINK-11246] - Fix distinct AGG visibility issues [FLINK-11251] - Incompatible metric name on prometheus reporter [FLINK-11279] - Invalid week interval parsing in ExpressionParser [FLINK-11302] - FlinkS3FileSystem uses an incorrect path for temporary files. [FLINK-11389] - Incorrectly use job information when call getSerializedTaskInformation in class TaskDeploymentDescriptor [FLINK-11419] - StreamingFileSink fails to recover after taskmanager failure [FLINK-11436] - Java deserialization failure of the AvroSerializer when used in an old CompositeSerializers New Feature [FLINK-10457] - Support SequenceFile for StreamingFileSink Improvement [FLINK-10910] - Harden Kubernetes e2e test [FLINK-11023] - Update LICENSE and NOTICE files for flink-connectors [FLINK-11079] - Skip deployment for flink-storm-examples [FLINK-11207] - Update Apache commons-compress from 1.4.1 to 1.18 [FLINK-11216] - Back to top button is missing in the Joining document and is not properly placed in the Process Function document [FLINK-11262] - Bump jython-standalone to 2.7.1 [FLINK-11289] - Rework example module structure to account for licensing [FLINK-11304] - Typo in time attributes doc [FLINK-11331] - Fix errors in tableApi.md and functions.md [FLINK-11469] - fix Tuning Checkpoints and Large State doc [FLINK-11473] - Clarify Documenation on Latency Tracking [FLINK-11628] - Cache maven on travis `}),e.add({id:219,href:"/2019/02/13/batch-as-a-special-case-of-streaming-and-alibabas-contribution-of-blink/",title:"Batch as a Special Case of Streaming and Alibaba's contribution of Blink",section:"Flink Blog",content:`Last week, we broke the news that Alibaba decided to contribute its Flink-fork, called Blink, back to the Apache Flink project. Why is that a big thing for Flink, what will it mean for users and the community, and how does it fit into Flink’s overall vision? Let&rsquo;s take a step back to understand this better&hellip;
+Sub-task [FLINK-11179] - JoinCancelingITCase#testCancelSortMatchWhileDoingHeavySorting test error [FLINK-11180] - ProcessFailureCancelingITCase#testCancelingOnProcessFailure [FLINK-11181] - SimpleRecoveryITCaseBase test error Bug [FLINK-10721] - Kafka discovery-loop exceptions may be swallowed [FLINK-10761] - MetricGroup#getAllVariables can deadlock [FLINK-10774] - connection leak when partition discovery is disabled and open throws exception [FLINK-10848] - Flink&#39;s Yarn ResourceManager can allocate too many excess containers [FLINK-11046] - ElasticSearch6Connector cause thread blocked when index failed with retry [FLINK-11071] - Dynamic proxy classes cannot be resolved when deserializing job graph [FLINK-11083] - CRowSerializerConfigSnapshot is not instantiable [FLINK-11084] - Incorrect ouput after two consecutive split and select [FLINK-11100] - Presto S3 FileSystem E2E test broken [FLINK-11119] - Incorrect Scala example for Table Function [FLINK-11134] - Invalid REST API request should not log the full exception in Flink logs [FLINK-11145] - Fix Hadoop version handling in binary release script [FLINK-11151] - FileUploadHandler stops working if the upload directory is removed [FLINK-11168] - LargePlanTest times out on Travis [FLINK-11173] - Proctime attribute validation throws an incorrect exception message [FLINK-11187] - StreamingFileSink with S3 backend transient socket timeout issues [FLINK-11191] - Exception in code generation when ambiguous columns in MATCH_RECOGNIZE [FLINK-11194] - missing Scala 2.12 build of HBase connector [FLINK-11201] - Document SBT dependency requirements when using MiniClusterResource [FLINK-11224] - Log is missing in scala-shell [FLINK-11227] - The DescriptorProperties contains some bounds checking errors [FLINK-11232] - Empty Start Time of sub-task on web dashboard [FLINK-11234] - ExternalTableCatalogBuilder unable to build a batch-only table [FLINK-11235] - Elasticsearch connector leaks threads if no connection could be established [FLINK-11246] - Fix distinct AGG visibility issues [FLINK-11251] - Incompatible metric name on prometheus reporter [FLINK-11279] - Invalid week interval parsing in ExpressionParser [FLINK-11302] - FlinkS3FileSystem uses an incorrect path for temporary files. [FLINK-11389] - Incorrectly use job information when call getSerializedTaskInformation in class TaskDeploymentDescriptor [FLINK-11419] - StreamingFileSink fails to recover after taskmanager failure [FLINK-11436] - Java deserialization failure of the AvroSerializer when used in an old CompositeSerializers New Feature [FLINK-10457] - Support SequenceFile for StreamingFileSink Improvement [FLINK-10910] - Harden Kubernetes e2e test [FLINK-11023] - Update LICENSE and NOTICE files for flink-connectors [FLINK-11079] - Skip deployment for flink-storm-examples [FLINK-11207] - Update Apache commons-compress from 1.4.1 to 1.18 [FLINK-11216] - Back to top button is missing in the Joining document and is not properly placed in the Process Function document [FLINK-11262] - Bump jython-standalone to 2.7.1 [FLINK-11289] - Rework example module structure to account for licensing [FLINK-11304] - Typo in time attributes doc [FLINK-11331] - Fix errors in tableApi.md and functions.md [FLINK-11469] - fix Tuning Checkpoints and Large State doc [FLINK-11473] - Clarify Documenation on Latency Tracking [FLINK-11628] - Cache maven on travis `}),e.add({id:220,href:"/2019/02/13/batch-as-a-special-case-of-streaming-and-alibabas-contribution-of-blink/",title:"Batch as a Special Case of Streaming and Alibaba's contribution of Blink",section:"Flink Blog",content:`Last week, we broke the news that Alibaba decided to contribute its Flink-fork, called Blink, back to the Apache Flink project. Why is that a big thing for Flink, what will it mean for users and the community, and how does it fit into Flink’s overall vision? Let&rsquo;s take a step back to understand this better&hellip;
 A Unified Approach to Batch and Streaming # Since its early days, Apache Flink has followed the philosophy of taking a unified approach to batch and streaming data processing. The core building block is &ldquo;continuous processing of unbounded data streams&rdquo;: if you can do that, you can also do offline processing of bounded data sets (batch processing use cases), because these are just streams that happen to end at some point.
 The &ldquo;streaming first, with batch as a special case of streaming&rdquo; philosophy is supported by various projects (for example Flink, Beam, etc.) and often been cited as a powerful way to build data applications that generalize across real-time and offline processing and to help greatly reduce the complexity of data infrastructures.
 Why are there still batch processors? # However, &ldquo;batch is just a special case of streaming&rdquo; does not mean that any stream processor is now the right tool for your batch processing use cases - the introduction of stream processors did not render batch processors obsolete:
@@ -4413,25 +4423,25 @@
 The extended catalog support, DDL support, as well as support for Hive’s catalog and integrations is currently going through separate design discussions. We plan to leverage existing code here whenever it makes sense.
 Summary # We believe that the data processing stack of the future is based on stream processing: The elegance of stream processing with its ability to model offline processing (batch), real-time data processing, and event-driven applications in the same way, while offering high performance and consistency is simply too compelling.
 Exploiting certain properties of bounded data is important for a stream processor to achieve the same performance as dedicated batch processors. While Flink always supported batch processing, the project is taking the next step in building a unified runtime and towards becoming a stream processor that is competitive with batch processing systems even on their home turf: OLAP SQL. The contribution of Alibaba’s Blink code helps the Flink community to pick up the speed on this development.
-`}),e.add({id:220,href:"/2018/12/26/apache-flink-1.5.6-released/",title:"Apache Flink 1.5.6 Released",section:"Flink Blog",content:`The Apache Flink community released the sixth and last bugfix version of the Apache Flink 1.5 series.
+`}),e.add({id:221,href:"/2018/12/26/apache-flink-1.5.6-released/",title:"Apache Flink 1.5.6 Released",section:"Flink Blog",content:`The Apache Flink community released the sixth and last bugfix version of the Apache Flink 1.5 series.
 This release includes more than 47 fixes and minor improvements for Flink 1.5.5. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.5.6.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.5.6&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.6&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.6&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-10252] - Handle oversized metric messages [FLINK-10863] - Assign uids to all operators Bug [FLINK-8336] - YarnFileStageTestS3ITCase.testRecursiveUploadForYarnS3 test instability [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-10166] - Dependency problems when executing SQL query in sql-client [FLINK-10309] - Cancel with savepoint fails with java.net.ConnectException when using the per job-mode [FLINK-10419] - ClassNotFoundException while deserializing user exceptions from checkpointing [FLINK-10455] - Potential Kafka producer leak in case of failures [FLINK-10482] - java.lang.IllegalArgumentException: Negative number of in progress checkpoints [FLINK-10491] - Deadlock during spilling data in SpillableSubpartition [FLINK-10566] - Flink Planning is exponential in the number of stages [FLINK-10581] - YarnConfigurationITCase.testFlinkContainerMemory test instability [FLINK-10642] - CodeGen split fields errors when maxGeneratedCodeLength equals 1 [FLINK-10655] - RemoteRpcInvocation not overwriting ObjectInputStream&#39;s ClassNotFoundException [FLINK-10669] - Exceptions &amp; errors are not properly checked in logs in e2e tests [FLINK-10670] - Fix Correlate codegen error [FLINK-10674] - Fix handling of retractions after clean up [FLINK-10690] - Tests leak resources via Files.list [FLINK-10693] - Fix Scala EitherSerializer duplication [FLINK-10715] - E2e tests fail with ConcurrentModificationException in MetricRegistryImpl [FLINK-10750] - SocketClientSinkTest.testRetry fails on Travis [FLINK-10752] - Result of AbstractYarnClusterDescriptor#validateClusterResources is ignored [FLINK-10753] - Propagate and log snapshotting exceptions [FLINK-10770] - Some generated functions are not opened properly. [FLINK-10773] - Resume externalized checkpoint end-to-end test fails [FLINK-10821] - Resuming Externalized Checkpoint E2E test does not resume from Externalized Checkpoint [FLINK-10839] - Fix implementation of PojoSerializer.duplicate() w.r.t. subclass serializer [FLINK-10856] - Harden resume from externalized checkpoint E2E test [FLINK-10857] - Conflict between JMX and Prometheus Metrics reporter [FLINK-10880] - Failover strategies should not be applied to Batch Execution [FLINK-10913] - ExecutionGraphRestartTest.testRestartAutomatically unstable on Travis [FLINK-10925] - NPE in PythonPlanStreamer [FLINK-10990] - Enforce minimum timespan in MeterView [FLINK-10998] - flink-metrics-ganglia has LGPL dependency [FLINK-11011] - Elasticsearch 6 sink end-to-end test unstable Improvement [FLINK-4173] - Replace maven-assembly-plugin by maven-shade-plugin in flink-metrics [FLINK-9869] - Send PartitionInfo in batch to Improve perfornance [FLINK-10613] - Remove logger casts in HBaseConnectorITCase [FLINK-10614] - Update test_batch_allround.sh e2e to new testing infrastructure [FLINK-10637] - Start MiniCluster with random REST port [FLINK-10678] - Add a switch to run_test to configure if logs should be checked for errors/excepions [FLINK-10906] - docker-entrypoint.sh logs credentails during startup [FLINK-10916] - Include duplicated user-specified uid into error message [FLINK-11005] - Define flink-sql-client uber-jar dependencies via artifactSet Test [FLINK-10606] - Construct NetworkEnvironment simple for tests [FLINK-10607] - Unify to remove duplicated NoOpResultPartitionConsumableNotifier [FLINK-10827] - Add test for duplicate() to SerializerTestBase `}),e.add({id:221,href:"/2018/12/22/apache-flink-1.6.3-released/",title:"Apache Flink 1.6.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.6 series.
+Sub-task [FLINK-10252] - Handle oversized metric messages [FLINK-10863] - Assign uids to all operators Bug [FLINK-8336] - YarnFileStageTestS3ITCase.testRecursiveUploadForYarnS3 test instability [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-10166] - Dependency problems when executing SQL query in sql-client [FLINK-10309] - Cancel with savepoint fails with java.net.ConnectException when using the per job-mode [FLINK-10419] - ClassNotFoundException while deserializing user exceptions from checkpointing [FLINK-10455] - Potential Kafka producer leak in case of failures [FLINK-10482] - java.lang.IllegalArgumentException: Negative number of in progress checkpoints [FLINK-10491] - Deadlock during spilling data in SpillableSubpartition [FLINK-10566] - Flink Planning is exponential in the number of stages [FLINK-10581] - YarnConfigurationITCase.testFlinkContainerMemory test instability [FLINK-10642] - CodeGen split fields errors when maxGeneratedCodeLength equals 1 [FLINK-10655] - RemoteRpcInvocation not overwriting ObjectInputStream&#39;s ClassNotFoundException [FLINK-10669] - Exceptions &amp; errors are not properly checked in logs in e2e tests [FLINK-10670] - Fix Correlate codegen error [FLINK-10674] - Fix handling of retractions after clean up [FLINK-10690] - Tests leak resources via Files.list [FLINK-10693] - Fix Scala EitherSerializer duplication [FLINK-10715] - E2e tests fail with ConcurrentModificationException in MetricRegistryImpl [FLINK-10750] - SocketClientSinkTest.testRetry fails on Travis [FLINK-10752] - Result of AbstractYarnClusterDescriptor#validateClusterResources is ignored [FLINK-10753] - Propagate and log snapshotting exceptions [FLINK-10770] - Some generated functions are not opened properly. [FLINK-10773] - Resume externalized checkpoint end-to-end test fails [FLINK-10821] - Resuming Externalized Checkpoint E2E test does not resume from Externalized Checkpoint [FLINK-10839] - Fix implementation of PojoSerializer.duplicate() w.r.t. subclass serializer [FLINK-10856] - Harden resume from externalized checkpoint E2E test [FLINK-10857] - Conflict between JMX and Prometheus Metrics reporter [FLINK-10880] - Failover strategies should not be applied to Batch Execution [FLINK-10913] - ExecutionGraphRestartTest.testRestartAutomatically unstable on Travis [FLINK-10925] - NPE in PythonPlanStreamer [FLINK-10990] - Enforce minimum timespan in MeterView [FLINK-10998] - flink-metrics-ganglia has LGPL dependency [FLINK-11011] - Elasticsearch 6 sink end-to-end test unstable Improvement [FLINK-4173] - Replace maven-assembly-plugin by maven-shade-plugin in flink-metrics [FLINK-9869] - Send PartitionInfo in batch to Improve perfornance [FLINK-10613] - Remove logger casts in HBaseConnectorITCase [FLINK-10614] - Update test_batch_allround.sh e2e to new testing infrastructure [FLINK-10637] - Start MiniCluster with random REST port [FLINK-10678] - Add a switch to run_test to configure if logs should be checked for errors/excepions [FLINK-10906] - docker-entrypoint.sh logs credentails during startup [FLINK-10916] - Include duplicated user-specified uid into error message [FLINK-11005] - Define flink-sql-client uber-jar dependencies via artifactSet Test [FLINK-10606] - Construct NetworkEnvironment simple for tests [FLINK-10607] - Unify to remove duplicated NoOpResultPartitionConsumableNotifier [FLINK-10827] - Add test for duplicate() to SerializerTestBase `}),e.add({id:222,href:"/2018/12/22/apache-flink-1.6.3-released/",title:"Apache Flink 1.6.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.6 series.
 This release includes more than 80 fixes and minor improvements for Flink 1.6.2. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.6.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.6.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-10097] - More tests to increase StreamingFileSink test coverage [FLINK-10252] - Handle oversized metric messages [FLINK-10367] - Avoid recursion stack overflow during releasing SingleInputGate [FLINK-10863] - Assign uids to all operators in general purpose testing job Bug [FLINK-8336] - YarnFileStageTestS3ITCase.testRecursiveUploadForYarnS3 test instability [FLINK-9635] - Local recovery scheduling can cause spread out of tasks [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-9878] - IO worker threads BLOCKED on SSL Session Cache while CMS full gc [FLINK-10149] - Fink Mesos allocates extra port when not configured to do so. [FLINK-10166] - Dependency problems when executing SQL query in sql-client [FLINK-10309] - Cancel with savepoint fails with java.net.ConnectException when using the per job-mode [FLINK-10357] - Streaming File Sink end-to-end test failed with mismatch [FLINK-10359] - Scala example in DataSet docs is broken [FLINK-10364] - Test instability in NonHAQueryableStateFsBackendITCase#testMapState [FLINK-10419] - ClassNotFoundException while deserializing user exceptions from checkpointing [FLINK-10425] - taskmanager.host is not respected [FLINK-10455] - Potential Kafka producer leak in case of failures [FLINK-10463] - Null literal cannot be properly parsed in Java Table API function call [FLINK-10481] - Wordcount end-to-end test in docker env unstable [FLINK-10482] - java.lang.IllegalArgumentException: Negative number of in progress checkpoints [FLINK-10491] - Deadlock during spilling data in SpillableSubpartition [FLINK-10566] - Flink Planning is exponential in the number of stages [FLINK-10567] - Lost serialize fields when ttl state store with the mutable serializer [FLINK-10570] - State grows unbounded when &quot;within&quot; constraint not applied [FLINK-10581] - YarnConfigurationITCase.testFlinkContainerMemory test instability [FLINK-10642] - CodeGen split fields errors when maxGeneratedCodeLength equals 1 [FLINK-10655] - RemoteRpcInvocation not overwriting ObjectInputStream&#39;s ClassNotFoundException [FLINK-10663] - Closing StreamingFileSink can cause NPE [FLINK-10669] - Exceptions &amp; errors are not properly checked in logs in e2e tests [FLINK-10670] - Fix Correlate codegen error [FLINK-10674] - Fix handling of retractions after clean up [FLINK-10681] - elasticsearch6.ElasticsearchSinkITCase fails if wrong JNA library installed [FLINK-10690] - Tests leak resources via Files.list [FLINK-10693] - Fix Scala EitherSerializer duplication [FLINK-10715] - E2e tests fail with ConcurrentModificationException in MetricRegistryImpl [FLINK-10750] - SocketClientSinkTest.testRetry fails on Travis [FLINK-10752] - Result of AbstractYarnClusterDescriptor#validateClusterResources is ignored [FLINK-10753] - Propagate and log snapshotting exceptions [FLINK-10763] - Interval join produces wrong result type in Scala API [FLINK-10770] - Some generated functions are not opened properly. [FLINK-10773] - Resume externalized checkpoint end-to-end test fails [FLINK-10809] - Using DataStreamUtils.reinterpretAsKeyedStream produces corrupted keyed state after restore [FLINK-10816] - Fix LockableTypeSerializer.duplicate() [FLINK-10821] - Resuming Externalized Checkpoint E2E test does not resume from Externalized Checkpoint [FLINK-10839] - Fix implementation of PojoSerializer.duplicate() w.r.t. subclass serializer [FLINK-10842] - Waiting loops are broken in e2e/common.sh [FLINK-10856] - Harden resume from externalized checkpoint E2E test [FLINK-10857] - Conflict between JMX and Prometheus Metrics reporter [FLINK-10880] - Failover strategies should not be applied to Batch Execution [FLINK-10913] - ExecutionGraphRestartTest.testRestartAutomatically unstable on Travis [FLINK-10925] - NPE in PythonPlanStreamer [FLINK-10946] - Resuming Externalized Checkpoint (rocks, incremental, scale up) end-to-end test failed on Travis [FLINK-10990] - Enforce minimum timespan in MeterView [FLINK-10992] - Jepsen: Do not use /tmp as HDFS Data Directory [FLINK-10997] - Avro-confluent-registry does not bundle any dependency [FLINK-10998] - flink-metrics-ganglia has LGPL dependency [FLINK-11011] - Elasticsearch 6 sink end-to-end test unstable [FLINK-11017] - Time interval for window aggregations in SQL is wrongly translated if specified with YEAR_MONTH resolution [FLINK-11029] - Incorrect parameter in Working with state doc [FLINK-11041] - ReinterpretDataStreamAsKeyedStreamITCase.testReinterpretAsKeyedStream failed on Travis [FLINK-11045] - UserCodeClassLoader has not been set correctly for RuntimeUDFContext in CollectionExecutor [FLINK-11083] - CRowSerializerConfigSnapshot is not instantiable [FLINK-11087] - Broadcast state migration Incompatibility from 1.5.3 to 1.7.0 [FLINK-11123] - Missing import in ML quickstart docs [FLINK-11136] - Fix the logical of merge for DISTINCT aggregates Improvement [FLINK-4173] - Replace maven-assembly-plugin by maven-shade-plugin in flink-metrics [FLINK-10353] - Restoring a KafkaProducer with Semantic.EXACTLY_ONCE from a savepoint written with Semantic.AT_LEAST_ONCE fails with NPE [FLINK-10608] - Add avro files generated by datastream-allround-test to RAT exclusions [FLINK-10613] - Remove logger casts in HBaseConnectorITCase [FLINK-10614] - Update test_batch_allround.sh e2e to new testing infrastructure [FLINK-10637] - Start MiniCluster with random REST port [FLINK-10678] - Add a switch to run_test to configure if logs should be checked for errors/excepions [FLINK-10692] - Harden Confluent schema E2E test [FLINK-10883] - Submitting a jobs without enough slots times out due to a unspecified timeout [FLINK-10906] - docker-entrypoint.sh logs credentails during startup [FLINK-10916] - Include duplicated user-specified uid into error message [FLINK-10951] - Disable enforcing of YARN container virtual memory limits in tests [FLINK-11005] - Define flink-sql-client uber-jar dependencies via artifactSet Test [FLINK-10606] - Construct NetworkEnvironment simple for tests [FLINK-10607] - Unify to remove duplicated NoOpResultPartitionConsumableNotifier [FLINK-10827] - Add test for duplicate() to SerializerTestBase Wish [FLINK-10793] - Change visibility of TtlValue and TtlSerializer to public for external tools `}),e.add({id:222,href:"/2018/12/21/apache-flink-1.7.1-released/",title:"Apache Flink 1.7.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.7 series.
+Sub-task [FLINK-10097] - More tests to increase StreamingFileSink test coverage [FLINK-10252] - Handle oversized metric messages [FLINK-10367] - Avoid recursion stack overflow during releasing SingleInputGate [FLINK-10863] - Assign uids to all operators in general purpose testing job Bug [FLINK-8336] - YarnFileStageTestS3ITCase.testRecursiveUploadForYarnS3 test instability [FLINK-9635] - Local recovery scheduling can cause spread out of tasks [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-9878] - IO worker threads BLOCKED on SSL Session Cache while CMS full gc [FLINK-10149] - Fink Mesos allocates extra port when not configured to do so. [FLINK-10166] - Dependency problems when executing SQL query in sql-client [FLINK-10309] - Cancel with savepoint fails with java.net.ConnectException when using the per job-mode [FLINK-10357] - Streaming File Sink end-to-end test failed with mismatch [FLINK-10359] - Scala example in DataSet docs is broken [FLINK-10364] - Test instability in NonHAQueryableStateFsBackendITCase#testMapState [FLINK-10419] - ClassNotFoundException while deserializing user exceptions from checkpointing [FLINK-10425] - taskmanager.host is not respected [FLINK-10455] - Potential Kafka producer leak in case of failures [FLINK-10463] - Null literal cannot be properly parsed in Java Table API function call [FLINK-10481] - Wordcount end-to-end test in docker env unstable [FLINK-10482] - java.lang.IllegalArgumentException: Negative number of in progress checkpoints [FLINK-10491] - Deadlock during spilling data in SpillableSubpartition [FLINK-10566] - Flink Planning is exponential in the number of stages [FLINK-10567] - Lost serialize fields when ttl state store with the mutable serializer [FLINK-10570] - State grows unbounded when &quot;within&quot; constraint not applied [FLINK-10581] - YarnConfigurationITCase.testFlinkContainerMemory test instability [FLINK-10642] - CodeGen split fields errors when maxGeneratedCodeLength equals 1 [FLINK-10655] - RemoteRpcInvocation not overwriting ObjectInputStream&#39;s ClassNotFoundException [FLINK-10663] - Closing StreamingFileSink can cause NPE [FLINK-10669] - Exceptions &amp; errors are not properly checked in logs in e2e tests [FLINK-10670] - Fix Correlate codegen error [FLINK-10674] - Fix handling of retractions after clean up [FLINK-10681] - elasticsearch6.ElasticsearchSinkITCase fails if wrong JNA library installed [FLINK-10690] - Tests leak resources via Files.list [FLINK-10693] - Fix Scala EitherSerializer duplication [FLINK-10715] - E2e tests fail with ConcurrentModificationException in MetricRegistryImpl [FLINK-10750] - SocketClientSinkTest.testRetry fails on Travis [FLINK-10752] - Result of AbstractYarnClusterDescriptor#validateClusterResources is ignored [FLINK-10753] - Propagate and log snapshotting exceptions [FLINK-10763] - Interval join produces wrong result type in Scala API [FLINK-10770] - Some generated functions are not opened properly. [FLINK-10773] - Resume externalized checkpoint end-to-end test fails [FLINK-10809] - Using DataStreamUtils.reinterpretAsKeyedStream produces corrupted keyed state after restore [FLINK-10816] - Fix LockableTypeSerializer.duplicate() [FLINK-10821] - Resuming Externalized Checkpoint E2E test does not resume from Externalized Checkpoint [FLINK-10839] - Fix implementation of PojoSerializer.duplicate() w.r.t. subclass serializer [FLINK-10842] - Waiting loops are broken in e2e/common.sh [FLINK-10856] - Harden resume from externalized checkpoint E2E test [FLINK-10857] - Conflict between JMX and Prometheus Metrics reporter [FLINK-10880] - Failover strategies should not be applied to Batch Execution [FLINK-10913] - ExecutionGraphRestartTest.testRestartAutomatically unstable on Travis [FLINK-10925] - NPE in PythonPlanStreamer [FLINK-10946] - Resuming Externalized Checkpoint (rocks, incremental, scale up) end-to-end test failed on Travis [FLINK-10990] - Enforce minimum timespan in MeterView [FLINK-10992] - Jepsen: Do not use /tmp as HDFS Data Directory [FLINK-10997] - Avro-confluent-registry does not bundle any dependency [FLINK-10998] - flink-metrics-ganglia has LGPL dependency [FLINK-11011] - Elasticsearch 6 sink end-to-end test unstable [FLINK-11017] - Time interval for window aggregations in SQL is wrongly translated if specified with YEAR_MONTH resolution [FLINK-11029] - Incorrect parameter in Working with state doc [FLINK-11041] - ReinterpretDataStreamAsKeyedStreamITCase.testReinterpretAsKeyedStream failed on Travis [FLINK-11045] - UserCodeClassLoader has not been set correctly for RuntimeUDFContext in CollectionExecutor [FLINK-11083] - CRowSerializerConfigSnapshot is not instantiable [FLINK-11087] - Broadcast state migration Incompatibility from 1.5.3 to 1.7.0 [FLINK-11123] - Missing import in ML quickstart docs [FLINK-11136] - Fix the logical of merge for DISTINCT aggregates Improvement [FLINK-4173] - Replace maven-assembly-plugin by maven-shade-plugin in flink-metrics [FLINK-10353] - Restoring a KafkaProducer with Semantic.EXACTLY_ONCE from a savepoint written with Semantic.AT_LEAST_ONCE fails with NPE [FLINK-10608] - Add avro files generated by datastream-allround-test to RAT exclusions [FLINK-10613] - Remove logger casts in HBaseConnectorITCase [FLINK-10614] - Update test_batch_allround.sh e2e to new testing infrastructure [FLINK-10637] - Start MiniCluster with random REST port [FLINK-10678] - Add a switch to run_test to configure if logs should be checked for errors/excepions [FLINK-10692] - Harden Confluent schema E2E test [FLINK-10883] - Submitting a jobs without enough slots times out due to a unspecified timeout [FLINK-10906] - docker-entrypoint.sh logs credentails during startup [FLINK-10916] - Include duplicated user-specified uid into error message [FLINK-10951] - Disable enforcing of YARN container virtual memory limits in tests [FLINK-11005] - Define flink-sql-client uber-jar dependencies via artifactSet Test [FLINK-10606] - Construct NetworkEnvironment simple for tests [FLINK-10607] - Unify to remove duplicated NoOpResultPartitionConsumableNotifier [FLINK-10827] - Add test for duplicate() to SerializerTestBase Wish [FLINK-10793] - Change visibility of TtlValue and TtlSerializer to public for external tools `}),e.add({id:223,href:"/2018/12/21/apache-flink-1.7.1-released/",title:"Apache Flink 1.7.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.7 series.
 This release includes 27 fixes and minor improvements for Flink 1.7.0. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.7.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.7.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.7.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.7.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-10252] - Handle oversized metric messages [FLINK-10367] - Avoid recursion stack overflow during releasing SingleInputGate [FLINK-10522] - Check if RecoverableWriter supportsResume and act accordingly. [FLINK-10963] - Cleanup small objects uploaded to S3 as independent objects Bug [FLINK-8336] - YarnFileStageTestS3ITCase.testRecursiveUploadForYarnS3 test instability [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-10149] - Fink Mesos allocates extra port when not configured to do so. [FLINK-10359] - Scala example in DataSet docs is broken [FLINK-10482] - java.lang.IllegalArgumentException: Negative number of in progress checkpoints [FLINK-10566] - Flink Planning is exponential in the number of stages [FLINK-10997] - Avro-confluent-registry does not bundle any dependency [FLINK-11011] - Elasticsearch 6 sink end-to-end test unstable [FLINK-11013] - Fix distinct aggregates for group window in Table API [FLINK-11017] - Time interval for window aggregations in SQL is wrongly translated if specified with YEAR_MONTH resolution [FLINK-11029] - Incorrect parameter in Working with state doc [FLINK-11032] - Elasticsearch (v6.3.1) sink end-to-end test unstable on Travis [FLINK-11033] - Elasticsearch (v6.3.1) sink end-to-end test unstable on Travis [FLINK-11041] - ReinterpretDataStreamAsKeyedStreamITCase.testReinterpretAsKeyedStream failed on Travis [FLINK-11044] - RegisterTableSink docs incorrect [FLINK-11045] - UserCodeClassLoader has not been set correctly for RuntimeUDFContext in CollectionExecutor [FLINK-11047] - CoGroupGroupSortTranslationTest does not compile with scala 2.12 [FLINK-11085] - NoClassDefFoundError in presto-s3 filesystem [FLINK-11087] - Broadcast state migration Incompatibility from 1.5.3 to 1.7.0 [FLINK-11094] - Restored state in RocksDBStateBackend that has not been accessed in restored execution causes NPE on snapshot [FLINK-11123] - Missing import in ML quickstart docs [FLINK-11136] - Fix the logical of merge for DISTINCT aggregates Improvement [FLINK-11080] - Define flink-connector-elasticsearch6 uber-jar dependencies via artifactSet `}),e.add({id:223,href:"/2018/11/30/apache-flink-1.7.0-release-announcement/",title:"Apache Flink 1.7.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce Apache Flink 1.7.0. The latest release includes more than 420 resolved issues and some exciting additions to Flink that we describe in the following sections of this post. Please check the complete changelog for more details.
+Sub-task [FLINK-10252] - Handle oversized metric messages [FLINK-10367] - Avoid recursion stack overflow during releasing SingleInputGate [FLINK-10522] - Check if RecoverableWriter supportsResume and act accordingly. [FLINK-10963] - Cleanup small objects uploaded to S3 as independent objects Bug [FLINK-8336] - YarnFileStageTestS3ITCase.testRecursiveUploadForYarnS3 test instability [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-10149] - Fink Mesos allocates extra port when not configured to do so. [FLINK-10359] - Scala example in DataSet docs is broken [FLINK-10482] - java.lang.IllegalArgumentException: Negative number of in progress checkpoints [FLINK-10566] - Flink Planning is exponential in the number of stages [FLINK-10997] - Avro-confluent-registry does not bundle any dependency [FLINK-11011] - Elasticsearch 6 sink end-to-end test unstable [FLINK-11013] - Fix distinct aggregates for group window in Table API [FLINK-11017] - Time interval for window aggregations in SQL is wrongly translated if specified with YEAR_MONTH resolution [FLINK-11029] - Incorrect parameter in Working with state doc [FLINK-11032] - Elasticsearch (v6.3.1) sink end-to-end test unstable on Travis [FLINK-11033] - Elasticsearch (v6.3.1) sink end-to-end test unstable on Travis [FLINK-11041] - ReinterpretDataStreamAsKeyedStreamITCase.testReinterpretAsKeyedStream failed on Travis [FLINK-11044] - RegisterTableSink docs incorrect [FLINK-11045] - UserCodeClassLoader has not been set correctly for RuntimeUDFContext in CollectionExecutor [FLINK-11047] - CoGroupGroupSortTranslationTest does not compile with scala 2.12 [FLINK-11085] - NoClassDefFoundError in presto-s3 filesystem [FLINK-11087] - Broadcast state migration Incompatibility from 1.5.3 to 1.7.0 [FLINK-11094] - Restored state in RocksDBStateBackend that has not been accessed in restored execution causes NPE on snapshot [FLINK-11123] - Missing import in ML quickstart docs [FLINK-11136] - Fix the logical of merge for DISTINCT aggregates Improvement [FLINK-11080] - Define flink-connector-elasticsearch6 uber-jar dependencies via artifactSet `}),e.add({id:224,href:"/2018/11/30/apache-flink-1.7.0-release-announcement/",title:"Apache Flink 1.7.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce Apache Flink 1.7.0. The latest release includes more than 420 resolved issues and some exciting additions to Flink that we describe in the following sections of this post. Please check the complete changelog for more details.
 Flink 1.7.0 is API-compatible with previous 1.x.y releases for APIs annotated with the @Public annotation. The release is available now and we encourage everyone to download the release and check out the updated documentation. Feedback through the Flink mailing lists or JIRA is, as always, very much appreciated!
 You can find the binaries on the updated Downloads page on the Flink project site.
 Flink 1.7.0 - Extending the reach of Stream Processing # In Flink 1.7.0 we come closer to our goals of enabling fast data processing and building data-intensive applications for the Flink community in a seamless way. Our latest release includes some exciting new features and improvements such as support for Scala 2.12, an exactly-once S3 file sink, the integration of complex event processing with streaming SQL and more features that we explain below.
@@ -4457,37 +4467,37 @@
 Release Notes # Please review the release notes if you plan to upgrade your Flink setup to Flink 1.7.
 List of Contributors # We would like to acknowledge all community members for contributing to this release. Special credits go to the following members for contributing to the 1.7.0 release (according to git):
 Aitozi, Alex Arkhipov, Alexander Koltsov, Alexey Trenikhin, Alice, Alice Yan, Aljoscha Krettek, Andrei Poluliakh, Andrey Zagrebin, Ashwin Sinha, Barisa Obradovic, Ben La Monica, Benoit Meriaux, Bowen Li, Chesnay Schepler, Christophe Jolif, Congxian Qiu, Craig Foster, David Anderson, Dawid Wysakowicz, Dian Fu, Diego Carvallo, Dimitris Palyvos, Eugen Yushin, Fabian Hueske, Florian Schmidt, Gary Yao, Guibo Pan, Hequn Cheng, Hiroaki Yoshida, Igal Shilman, JIN SUN, Jamie Grier, Jayant Ameta, Jeff Zhang, Jeffrey Chung, Jicaar, Jin Sun, Joe Malt, Johannes Dillmann, Jun Zhang, Kostas Kloudas, Krzysztof Białek, Lakshmi Gururaja Rao, Liu Biao, Mahesh Senniappan, Manuel Hoffmann, Mark Cho, Max Feng, Mike Pedersen, Mododo, Nico Kruber, Oleksandr Nitavskyi, Osman Şamil AKÇELİK, Patrick Lucas, Paul Lam, Piotr Nowojski, Rick Hofstede, Rong R, Rong Rong, Sayat Satybaldiyev, Sebastian Klemke, Seth Wiesman, Shimin Yang, Shuyi Chen, Stefan Richter, Stephan Ewen, Stephen Jason, Thomas Weise, Till Rohrmann, Timo Walther, Tzu-Li &ldquo;tison&rdquo; Chen, Tzu-Li (Gordon) Tai, Tzu-Li Chen, Wosin, Xingcan Cui, Xpray, Xue Yu, Yangze Guo, Ying Xu, Yun Tang, Zhijiang, blues Zheng, hequn8128, ifndef-SleePy, jerryjzhang, jrthe42, jyc.jia, kkolman, lihongli, linjun, linzhaoming, liurenjie1024, liuxianjiao, lrl, lsy, lzqdename, maqingxiang, maqingxiang-it, minwenjun, shuai-xu, sihuazhou, snuyanzin, wind, xuewei.linxuewei, xueyu, xuqianjin, yanghua, yangshimin, zhijiang, 谢磊, 陈梓立
-`}),e.add({id:224,href:"/2018/10/29/apache-flink-1.5.5-released/",title:"Apache Flink 1.5.5 Released",section:"Flink Blog",content:`The Apache Flink community released the fifth bugfix version of the Apache Flink 1.5 series.
+`}),e.add({id:225,href:"/2018/10/29/apache-flink-1.5.5-released/",title:"Apache Flink 1.5.5 Released",section:"Flink Blog",content:`The Apache Flink community released the fifth bugfix version of the Apache Flink 1.5 series.
 This release includes more than 20 fixes and minor improvements for Flink 1.5.4. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.5.5.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.5.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.5&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-10242] - Latency marker interval should be configurable [FLINK-10243] - Add option to reduce latency metrics granularity [FLINK-10331] - Reduce number of flush requests to the network stack [FLINK-10332] - Move data available notification in PipelinedSubpartition out of the synchronized block Bug [FLINK-5542] - YARN client incorrectly uses local YARN config to check vcore capacity [FLINK-9567] - Flink does not release resource in Yarn Cluster mode [FLINK-9788] - ExecutionGraph Inconsistency prevents Job from recovering [FLINK-9884] - Slot request may not be removed when it has already be assigned in slot manager [FLINK-9891] - Flink cluster is not shutdown in YARN mode when Flink client is stopped [FLINK-9932] - Timed-out TaskExecutor slot-offers to JobMaster leak the slot [FLINK-10135] - Certain cluster-level metrics are no longer exposed [FLINK-10222] - Table scalar function expression parses error when function name equals the exists keyword suffix [FLINK-10259] - Key validation for GroupWindowAggregate is broken [FLINK-10316] - Add check to KinesisProducer that aws.region is set [FLINK-10354] - Savepoints should be counted as retained checkpoints [FLINK-10400] - Return failed JobResult if job terminates in state FAILED or CANCELED [FLINK-10415] - RestClient does not react to lost connection [FLINK-10451] - TableFunctionCollector should handle the life cycle of ScalarFunction [FLINK-10469] - FileChannel may not write the whole buffer in a single call to FileChannel.write(Buffer buffer) [FLINK-10487] - fix invalid Flink SQL example [FLINK-10516] - YarnApplicationMasterRunner does not initialize FileSystem with correct Flink Configuration during setup [FLINK-10524] - MemoryManagerConcurrentModReleaseTest.testConcurrentModificationWhileReleasing failed on travis [FLINK-10544] - Remove custom settings.xml for snapshot deployments Improvement [FLINK-10075] - HTTP connections to a secured REST endpoint flood the log [FLINK-10260] - Confusing log messages during TaskManager registration [FLINK-10282] - Provide separate thread-pool for REST endpoint [FLINK-10312] - Wrong / missing exception when submitting job [FLINK-10375] - ExceptionInChainedStubException hides wrapped exception in cause [FLINK-10582] - Make REST executor thread priority configurable `}),e.add({id:225,href:"/2018/10/29/apache-flink-1.6.2-released/",title:"Apache Flink 1.6.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.6 series.
+Sub-task [FLINK-10242] - Latency marker interval should be configurable [FLINK-10243] - Add option to reduce latency metrics granularity [FLINK-10331] - Reduce number of flush requests to the network stack [FLINK-10332] - Move data available notification in PipelinedSubpartition out of the synchronized block Bug [FLINK-5542] - YARN client incorrectly uses local YARN config to check vcore capacity [FLINK-9567] - Flink does not release resource in Yarn Cluster mode [FLINK-9788] - ExecutionGraph Inconsistency prevents Job from recovering [FLINK-9884] - Slot request may not be removed when it has already be assigned in slot manager [FLINK-9891] - Flink cluster is not shutdown in YARN mode when Flink client is stopped [FLINK-9932] - Timed-out TaskExecutor slot-offers to JobMaster leak the slot [FLINK-10135] - Certain cluster-level metrics are no longer exposed [FLINK-10222] - Table scalar function expression parses error when function name equals the exists keyword suffix [FLINK-10259] - Key validation for GroupWindowAggregate is broken [FLINK-10316] - Add check to KinesisProducer that aws.region is set [FLINK-10354] - Savepoints should be counted as retained checkpoints [FLINK-10400] - Return failed JobResult if job terminates in state FAILED or CANCELED [FLINK-10415] - RestClient does not react to lost connection [FLINK-10451] - TableFunctionCollector should handle the life cycle of ScalarFunction [FLINK-10469] - FileChannel may not write the whole buffer in a single call to FileChannel.write(Buffer buffer) [FLINK-10487] - fix invalid Flink SQL example [FLINK-10516] - YarnApplicationMasterRunner does not initialize FileSystem with correct Flink Configuration during setup [FLINK-10524] - MemoryManagerConcurrentModReleaseTest.testConcurrentModificationWhileReleasing failed on travis [FLINK-10544] - Remove custom settings.xml for snapshot deployments Improvement [FLINK-10075] - HTTP connections to a secured REST endpoint flood the log [FLINK-10260] - Confusing log messages during TaskManager registration [FLINK-10282] - Provide separate thread-pool for REST endpoint [FLINK-10312] - Wrong / missing exception when submitting job [FLINK-10375] - ExceptionInChainedStubException hides wrapped exception in cause [FLINK-10582] - Make REST executor thread priority configurable `}),e.add({id:226,href:"/2018/10/29/apache-flink-1.6.2-released/",title:"Apache Flink 1.6.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.6 series.
 This release includes more than 30 fixes and minor improvements for Flink 1.6.1. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.6.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.6.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-10242] - Latency marker interval should be configurable [FLINK-10243] - Add option to reduce latency metrics granularity [FLINK-10331] - Reduce number of flush requests to the network stack [FLINK-10332] - Move data available notification in PipelinedSubpartition out of the synchronized block Bug [FLINK-5542] - YARN client incorrectly uses local YARN config to check vcore capacity [FLINK-9567] - Flink does not release resource in Yarn Cluster mode [FLINK-9788] - ExecutionGraph Inconsistency prevents Job from recovering [FLINK-9884] - Slot request may not be removed when it has already be assigned in slot manager [FLINK-9891] - Flink cluster is not shutdown in YARN mode when Flink client is stopped [FLINK-9932] - Timed-out TaskExecutor slot-offers to JobMaster leak the slot [FLINK-10135] - Certain cluster-level metrics are no longer exposed [FLINK-10157] - Allow \`null\` user values in map state with TTL [FLINK-10222] - Table scalar function expression parses error when function name equals the exists keyword suffix [FLINK-10259] - Key validation for GroupWindowAggregate is broken [FLINK-10263] - User-defined function with LITERAL paramters yields CompileException [FLINK-10316] - Add check to KinesisProducer that aws.region is set [FLINK-10354] - Savepoints should be counted as retained checkpoints [FLINK-10363] - S3 FileSystem factory prints secrets into logs [FLINK-10379] - Can not use Table Functions in Java Table API [FLINK-10383] - Hadoop configurations on the classpath seep into the S3 file system configs [FLINK-10390] - DataDog MetricReporter leaks connections [FLINK-10400] - Return failed JobResult if job terminates in state FAILED or CANCELED [FLINK-10415] - RestClient does not react to lost connection [FLINK-10444] - Make S3 entropy injection work with FileSystem safety net [FLINK-10451] - TableFunctionCollector should handle the life cycle of ScalarFunction [FLINK-10465] - Jepsen: runit supervised sshd is stopped on tear down [FLINK-10469] - FileChannel may not write the whole buffer in a single call to FileChannel.write(Buffer buffer) [FLINK-10487] - fix invalid Flink SQL example [FLINK-10516] - YarnApplicationMasterRunner does not initialize FileSystem with correct Flink Configuration during setup [FLINK-10524] - MemoryManagerConcurrentModReleaseTest.testConcurrentModificationWhileReleasing failed on travis [FLINK-10532] - Broken links in documentation [FLINK-10544] - Remove custom settings.xml for snapshot deployments Improvement [FLINK-9061] - Add entropy to s3 path for better scalability [FLINK-10075] - HTTP connections to a secured REST endpoint flood the log [FLINK-10260] - Confusing log messages during TaskManager registration [FLINK-10282] - Provide separate thread-pool for REST endpoint [FLINK-10291] - Generate JobGraph with fixed/configurable JobID in StandaloneJobClusterEntrypoint [FLINK-10311] - HA end-to-end/Jepsen tests for standby Dispatchers [FLINK-10312] - Wrong / missing exception when submitting job [FLINK-10371] - Allow to enable SSL mutual authentication on REST endpoints by configuration [FLINK-10375] - ExceptionInChainedStubException hides wrapped exception in cause [FLINK-10582] - Make REST executor thread priority configurable `}),e.add({id:226,href:"/2018/09/20/apache-flink-1.5.4-released/",title:"Apache Flink 1.5.4 Released",section:"Flink Blog",content:`The Apache Flink community released the fourth bugfix version of the Apache Flink 1.5 series.
+Sub-task [FLINK-10242] - Latency marker interval should be configurable [FLINK-10243] - Add option to reduce latency metrics granularity [FLINK-10331] - Reduce number of flush requests to the network stack [FLINK-10332] - Move data available notification in PipelinedSubpartition out of the synchronized block Bug [FLINK-5542] - YARN client incorrectly uses local YARN config to check vcore capacity [FLINK-9567] - Flink does not release resource in Yarn Cluster mode [FLINK-9788] - ExecutionGraph Inconsistency prevents Job from recovering [FLINK-9884] - Slot request may not be removed when it has already be assigned in slot manager [FLINK-9891] - Flink cluster is not shutdown in YARN mode when Flink client is stopped [FLINK-9932] - Timed-out TaskExecutor slot-offers to JobMaster leak the slot [FLINK-10135] - Certain cluster-level metrics are no longer exposed [FLINK-10157] - Allow \`null\` user values in map state with TTL [FLINK-10222] - Table scalar function expression parses error when function name equals the exists keyword suffix [FLINK-10259] - Key validation for GroupWindowAggregate is broken [FLINK-10263] - User-defined function with LITERAL paramters yields CompileException [FLINK-10316] - Add check to KinesisProducer that aws.region is set [FLINK-10354] - Savepoints should be counted as retained checkpoints [FLINK-10363] - S3 FileSystem factory prints secrets into logs [FLINK-10379] - Can not use Table Functions in Java Table API [FLINK-10383] - Hadoop configurations on the classpath seep into the S3 file system configs [FLINK-10390] - DataDog MetricReporter leaks connections [FLINK-10400] - Return failed JobResult if job terminates in state FAILED or CANCELED [FLINK-10415] - RestClient does not react to lost connection [FLINK-10444] - Make S3 entropy injection work with FileSystem safety net [FLINK-10451] - TableFunctionCollector should handle the life cycle of ScalarFunction [FLINK-10465] - Jepsen: runit supervised sshd is stopped on tear down [FLINK-10469] - FileChannel may not write the whole buffer in a single call to FileChannel.write(Buffer buffer) [FLINK-10487] - fix invalid Flink SQL example [FLINK-10516] - YarnApplicationMasterRunner does not initialize FileSystem with correct Flink Configuration during setup [FLINK-10524] - MemoryManagerConcurrentModReleaseTest.testConcurrentModificationWhileReleasing failed on travis [FLINK-10532] - Broken links in documentation [FLINK-10544] - Remove custom settings.xml for snapshot deployments Improvement [FLINK-9061] - Add entropy to s3 path for better scalability [FLINK-10075] - HTTP connections to a secured REST endpoint flood the log [FLINK-10260] - Confusing log messages during TaskManager registration [FLINK-10282] - Provide separate thread-pool for REST endpoint [FLINK-10291] - Generate JobGraph with fixed/configurable JobID in StandaloneJobClusterEntrypoint [FLINK-10311] - HA end-to-end/Jepsen tests for standby Dispatchers [FLINK-10312] - Wrong / missing exception when submitting job [FLINK-10371] - Allow to enable SSL mutual authentication on REST endpoints by configuration [FLINK-10375] - ExceptionInChainedStubException hides wrapped exception in cause [FLINK-10582] - Make REST executor thread priority configurable `}),e.add({id:227,href:"/2018/09/20/apache-flink-1.5.4-released/",title:"Apache Flink 1.5.4 Released",section:"Flink Blog",content:`The Apache Flink community released the fourth bugfix version of the Apache Flink 1.5 series.
 This release includes more than 20 fixes and minor improvements for Flink 1.5.4. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.5.4.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.5.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.4&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Bug [FLINK-9878] - IO worker threads BLOCKED on SSL Session Cache while CMS full gc [FLINK-10011] - Old job resurrected during HA failover [FLINK-10101] - Mesos web ui url is missing. [FLINK-10115] - Content-length limit is also applied to FileUploads [FLINK-10116] - createComparator fails on case class with Unit type fields prior to the join-key [FLINK-10141] - Reduce lock contention introduced with 1.5 [FLINK-10142] - Reduce synchronization overhead for credit notifications [FLINK-10150] - Chained batch operators interfere with each other other [FLINK-10172] - Inconsistentcy in ExpressionParser and ExpressionDsl for order by asc/desc [FLINK-10193] - Default RPC timeout is used when triggering savepoint via JobMasterGateway [FLINK-10204] - StreamElementSerializer#copy broken for LatencyMarkers [FLINK-10255] - Standby Dispatcher locks submitted JobGraphs [FLINK-10261] - INSERT INTO does not work with ORDER BY clause [FLINK-10267] - [State] Fix arbitrary iterator access on RocksDBMapIterator [FLINK-10293] - RemoteStreamEnvironment does not forward port to RestClusterClient [FLINK-10314] - Blocking calls in Execution Graph creation bring down cluster [FLINK-10328] - Stopping the ZooKeeperSubmittedJobGraphStore should release all currently held locks [FLINK-10329] - Fail with exception if job cannot be removed by ZooKeeperSubmittedJobGraphStore#removeJobGraph Improvement [FLINK-10082] - Initialize StringBuilder in Slf4jReporter with estimated size [FLINK-10131] - Improve logging around ResultSubpartition [FLINK-10137] - YARN: Log completed Containers [FLINK-10185] - Make ZooKeeperStateHandleStore#releaseAndTryRemove synchronous [FLINK-10223] - TaskManagers should log their ResourceID during startup [FLINK-10301] - Allow a custom Configuration in StreamNetworkBenchmarkEnvironment `}),e.add({id:227,href:"/2018/09/20/apache-flink-1.6.1-released/",title:"Apache Flink 1.6.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.6 series.
+Bug [FLINK-9878] - IO worker threads BLOCKED on SSL Session Cache while CMS full gc [FLINK-10011] - Old job resurrected during HA failover [FLINK-10101] - Mesos web ui url is missing. [FLINK-10115] - Content-length limit is also applied to FileUploads [FLINK-10116] - createComparator fails on case class with Unit type fields prior to the join-key [FLINK-10141] - Reduce lock contention introduced with 1.5 [FLINK-10142] - Reduce synchronization overhead for credit notifications [FLINK-10150] - Chained batch operators interfere with each other other [FLINK-10172] - Inconsistentcy in ExpressionParser and ExpressionDsl for order by asc/desc [FLINK-10193] - Default RPC timeout is used when triggering savepoint via JobMasterGateway [FLINK-10204] - StreamElementSerializer#copy broken for LatencyMarkers [FLINK-10255] - Standby Dispatcher locks submitted JobGraphs [FLINK-10261] - INSERT INTO does not work with ORDER BY clause [FLINK-10267] - [State] Fix arbitrary iterator access on RocksDBMapIterator [FLINK-10293] - RemoteStreamEnvironment does not forward port to RestClusterClient [FLINK-10314] - Blocking calls in Execution Graph creation bring down cluster [FLINK-10328] - Stopping the ZooKeeperSubmittedJobGraphStore should release all currently held locks [FLINK-10329] - Fail with exception if job cannot be removed by ZooKeeperSubmittedJobGraphStore#removeJobGraph Improvement [FLINK-10082] - Initialize StringBuilder in Slf4jReporter with estimated size [FLINK-10131] - Improve logging around ResultSubpartition [FLINK-10137] - YARN: Log completed Containers [FLINK-10185] - Make ZooKeeperStateHandleStore#releaseAndTryRemove synchronous [FLINK-10223] - TaskManagers should log their ResourceID during startup [FLINK-10301] - Allow a custom Configuration in StreamNetworkBenchmarkEnvironment `}),e.add({id:228,href:"/2018/09/20/apache-flink-1.6.1-released/",title:"Apache Flink 1.6.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.6 series.
 This release includes 60 fixes and minor improvements for Flink 1.6.1. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.6.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-9637] - Add public user documentation for TTL feature [FLINK-10068] - Add documentation for async/RocksDB-based timers [FLINK-10085] - Update AbstractOperatorRestoreTestBase [FLINK-10087] - Update BucketingSinkMigrationTest [FLINK-10089] - Update FlinkKafkaConsumerBaseMigrationTest [FLINK-10090] - Update ContinuousFileProcessingMigrationTest [FLINK-10091] - Update WindowOperatorMigrationTest [FLINK-10092] - Update StatefulJobSavepointMigrationITCase [FLINK-10109] - Add documentation for StreamingFileSink Bug [FLINK-9289] - Parallelism of generated operators should have max parallism of input [FLINK-9546] - The heartbeatTimeoutIntervalMs of HeartbeatMonitor should be larger than 0 [FLINK-9693] - Possible memory leak in jobmanager retaining archived checkpoints [FLINK-9972] - Debug memory logging not working [FLINK-10011] - Old job resurrected during HA failover [FLINK-10063] - Jepsen: Automatically restart Mesos Processes [FLINK-10101] - Mesos web ui url is missing. [FLINK-10105] - Test failure because of jobmanager.execution.failover-strategy is outdated [FLINK-10115] - Content-length limit is also applied to FileUploads [FLINK-10116] - createComparator fails on case class with Unit type fields prior to the join-key [FLINK-10141] - Reduce lock contention introduced with 1.5 [FLINK-10142] - Reduce synchronization overhead for credit notifications [FLINK-10150] - Chained batch operators interfere with each other other [FLINK-10151] - [State TTL] Fix false recursion call in TransformingStateTableKeyGroupPartitioner.tryAddToSource [FLINK-10154] - Make sure we always read at least one record in KinesisConnector [FLINK-10169] - RowtimeValidator fails with custom TimestampExtractor [FLINK-10172] - Inconsistentcy in ExpressionParser and ExpressionDsl for order by asc/desc [FLINK-10192] - SQL Client table visualization mode does not update correctly [FLINK-10193] - Default RPC timeout is used when triggering savepoint via JobMasterGateway [FLINK-10204] - StreamElementSerializer#copy broken for LatencyMarkers [FLINK-10255] - Standby Dispatcher locks submitted JobGraphs [FLINK-10261] - INSERT INTO does not work with ORDER BY clause [FLINK-10267] - [State] Fix arbitrary iterator access on RocksDBMapIterator [FLINK-10269] - Elasticsearch 6 UpdateRequest fail because of binary incompatibility [FLINK-10283] - FileCache logs unnecessary warnings [FLINK-10293] - RemoteStreamEnvironment does not forward port to RestClusterClient [FLINK-10314] - Blocking calls in Execution Graph creation bring down cluster [FLINK-10328] - Stopping the ZooKeeperSubmittedJobGraphStore should release all currently held locks [FLINK-10329] - Fail with exception if job cannot be removed by ZooKeeperSubmittedJobGraphStore#removeJobGraph New Feature [FLINK-10022] - Add metrics for input/output buffers Improvement [FLINK-9013] - Document yarn.containers.vcores only being effective when adapting YARN config [FLINK-9446] - Compatibility table not up-to-date [FLINK-9795] - Update Mesos documentation for flip6 [FLINK-9859] - More Akka config options [FLINK-9899] - Add more metrics to the Kinesis source connector [FLINK-9962] - allow users to specify TimeZone in DateTimeBucketer [FLINK-10001] - Improve Kubernetes documentation [FLINK-10006] - Improve logging in BarrierBuffer [FLINK-10020] - Kinesis Consumer listShards should support more recoverable exceptions [FLINK-10082] - Initialize StringBuilder in Slf4jReporter with estimated size [FLINK-10094] - Always backup default config for end-to-end tests [FLINK-10110] - Harden e2e Kafka shutdown [FLINK-10131] - Improve logging around ResultSubpartition [FLINK-10137] - YARN: Log completed Containers [FLINK-10164] - Add support for resuming from savepoints to StandaloneJobClusterEntrypoint [FLINK-10170] - Support string representation for map and array types in descriptor-based Table API [FLINK-10185] - Make ZooKeeperStateHandleStore#releaseAndTryRemove synchronous [FLINK-10223] - TaskManagers should log their ResourceID during startup [FLINK-10301] - Allow a custom Configuration in StreamNetworkBenchmarkEnvironment [FLINK-10325] - [State TTL] Refactor TtlListState to use only loops, no java stream API for performance Test [FLINK-10084] - Migration tests weren&#39;t updated for 1.5 `}),e.add({id:228,href:"/2018/08/21/apache-flink-1.5.3-released/",title:"Apache Flink 1.5.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.5 series.
+Sub-task [FLINK-9637] - Add public user documentation for TTL feature [FLINK-10068] - Add documentation for async/RocksDB-based timers [FLINK-10085] - Update AbstractOperatorRestoreTestBase [FLINK-10087] - Update BucketingSinkMigrationTest [FLINK-10089] - Update FlinkKafkaConsumerBaseMigrationTest [FLINK-10090] - Update ContinuousFileProcessingMigrationTest [FLINK-10091] - Update WindowOperatorMigrationTest [FLINK-10092] - Update StatefulJobSavepointMigrationITCase [FLINK-10109] - Add documentation for StreamingFileSink Bug [FLINK-9289] - Parallelism of generated operators should have max parallism of input [FLINK-9546] - The heartbeatTimeoutIntervalMs of HeartbeatMonitor should be larger than 0 [FLINK-9693] - Possible memory leak in jobmanager retaining archived checkpoints [FLINK-9972] - Debug memory logging not working [FLINK-10011] - Old job resurrected during HA failover [FLINK-10063] - Jepsen: Automatically restart Mesos Processes [FLINK-10101] - Mesos web ui url is missing. [FLINK-10105] - Test failure because of jobmanager.execution.failover-strategy is outdated [FLINK-10115] - Content-length limit is also applied to FileUploads [FLINK-10116] - createComparator fails on case class with Unit type fields prior to the join-key [FLINK-10141] - Reduce lock contention introduced with 1.5 [FLINK-10142] - Reduce synchronization overhead for credit notifications [FLINK-10150] - Chained batch operators interfere with each other other [FLINK-10151] - [State TTL] Fix false recursion call in TransformingStateTableKeyGroupPartitioner.tryAddToSource [FLINK-10154] - Make sure we always read at least one record in KinesisConnector [FLINK-10169] - RowtimeValidator fails with custom TimestampExtractor [FLINK-10172] - Inconsistentcy in ExpressionParser and ExpressionDsl for order by asc/desc [FLINK-10192] - SQL Client table visualization mode does not update correctly [FLINK-10193] - Default RPC timeout is used when triggering savepoint via JobMasterGateway [FLINK-10204] - StreamElementSerializer#copy broken for LatencyMarkers [FLINK-10255] - Standby Dispatcher locks submitted JobGraphs [FLINK-10261] - INSERT INTO does not work with ORDER BY clause [FLINK-10267] - [State] Fix arbitrary iterator access on RocksDBMapIterator [FLINK-10269] - Elasticsearch 6 UpdateRequest fail because of binary incompatibility [FLINK-10283] - FileCache logs unnecessary warnings [FLINK-10293] - RemoteStreamEnvironment does not forward port to RestClusterClient [FLINK-10314] - Blocking calls in Execution Graph creation bring down cluster [FLINK-10328] - Stopping the ZooKeeperSubmittedJobGraphStore should release all currently held locks [FLINK-10329] - Fail with exception if job cannot be removed by ZooKeeperSubmittedJobGraphStore#removeJobGraph New Feature [FLINK-10022] - Add metrics for input/output buffers Improvement [FLINK-9013] - Document yarn.containers.vcores only being effective when adapting YARN config [FLINK-9446] - Compatibility table not up-to-date [FLINK-9795] - Update Mesos documentation for flip6 [FLINK-9859] - More Akka config options [FLINK-9899] - Add more metrics to the Kinesis source connector [FLINK-9962] - allow users to specify TimeZone in DateTimeBucketer [FLINK-10001] - Improve Kubernetes documentation [FLINK-10006] - Improve logging in BarrierBuffer [FLINK-10020] - Kinesis Consumer listShards should support more recoverable exceptions [FLINK-10082] - Initialize StringBuilder in Slf4jReporter with estimated size [FLINK-10094] - Always backup default config for end-to-end tests [FLINK-10110] - Harden e2e Kafka shutdown [FLINK-10131] - Improve logging around ResultSubpartition [FLINK-10137] - YARN: Log completed Containers [FLINK-10164] - Add support for resuming from savepoints to StandaloneJobClusterEntrypoint [FLINK-10170] - Support string representation for map and array types in descriptor-based Table API [FLINK-10185] - Make ZooKeeperStateHandleStore#releaseAndTryRemove synchronous [FLINK-10223] - TaskManagers should log their ResourceID during startup [FLINK-10301] - Allow a custom Configuration in StreamNetworkBenchmarkEnvironment [FLINK-10325] - [State TTL] Refactor TtlListState to use only loops, no java stream API for performance Test [FLINK-10084] - Migration tests weren&#39;t updated for 1.5 `}),e.add({id:229,href:"/2018/08/21/apache-flink-1.5.3-released/",title:"Apache Flink 1.5.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.5 series.
 This release includes more than 20 fixes and minor improvements for Flink 1.5.3. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.5.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.5.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-9951] - Update scm developerConnection Bug [FLINK-5750] - Incorrect translation of n-ary Union [FLINK-9289] - Parallelism of generated operators should have max parallism of input [FLINK-9546] - The heartbeatTimeoutIntervalMs of HeartbeatMonitor should be larger than 0 [FLINK-9655] - Externalized checkpoint E2E test fails on travis [FLINK-9693] - Possible memory leak in jobmanager retaining archived checkpoints [FLINK-9694] - Potentially NPE in CompositeTypeSerializerConfigSnapshot constructor [FLINK-9923] - OneInputStreamTaskTest.testWatermarkMetrics fails on Travis [FLINK-9935] - Batch Table API: grouping by window and attribute causes java.lang.ClassCastException: [FLINK-9936] - Mesos resource manager unable to connect to master after failover [FLINK-9946] - Quickstart E2E test archetype version is hard-coded [FLINK-9969] - Unreasonable memory requirements to complete examples/batch/WordCount [FLINK-9972] - Debug memory logging not working [FLINK-9978] - Source release sha contains absolute file path [FLINK-9985] - Incorrect parameter order in document [FLINK-9988] - job manager does not respect property jobmanager.web.address [FLINK-10013] - Fix Kerberos integration for FLIP-6 YarnTaskExecutorRunner [FLINK-10033] - Let Task release reference to Invokable on shutdown [FLINK-10070] - Flink cannot be compiled with maven 3.0.x New Feature [FLINK-10022] - Add metrics for input/output buffers Improvement [FLINK-9446] - Compatibility table not up-to-date [FLINK-9765] - Improve CLI responsiveness when cluster is not reachable [FLINK-9806] - Add a canonical link element to documentation HTML [FLINK-9859] - More Akka config options [FLINK-9942] - Guard handlers against null fields in requests [FLINK-9986] - Remove unnecessary information from .version.properties file [FLINK-9987] - Rework ClassLoader E2E test to not rely on .version.properties file [FLINK-10006] - Improve logging in BarrierBuffer [FLINK-10016] - Make YARN/Kerberos end-to-end test stricter `}),e.add({id:229,href:"/2018/08/09/apache-flink-1.6.0-release-announcement/",title:"Apache Flink 1.6.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is proud to announce the 1.6.0 release. Over the past 2 months, the Flink community has worked hard to resolve more than 360 issues. Please check the complete changelog for more details.
+Sub-task [FLINK-9951] - Update scm developerConnection Bug [FLINK-5750] - Incorrect translation of n-ary Union [FLINK-9289] - Parallelism of generated operators should have max parallism of input [FLINK-9546] - The heartbeatTimeoutIntervalMs of HeartbeatMonitor should be larger than 0 [FLINK-9655] - Externalized checkpoint E2E test fails on travis [FLINK-9693] - Possible memory leak in jobmanager retaining archived checkpoints [FLINK-9694] - Potentially NPE in CompositeTypeSerializerConfigSnapshot constructor [FLINK-9923] - OneInputStreamTaskTest.testWatermarkMetrics fails on Travis [FLINK-9935] - Batch Table API: grouping by window and attribute causes java.lang.ClassCastException: [FLINK-9936] - Mesos resource manager unable to connect to master after failover [FLINK-9946] - Quickstart E2E test archetype version is hard-coded [FLINK-9969] - Unreasonable memory requirements to complete examples/batch/WordCount [FLINK-9972] - Debug memory logging not working [FLINK-9978] - Source release sha contains absolute file path [FLINK-9985] - Incorrect parameter order in document [FLINK-9988] - job manager does not respect property jobmanager.web.address [FLINK-10013] - Fix Kerberos integration for FLIP-6 YarnTaskExecutorRunner [FLINK-10033] - Let Task release reference to Invokable on shutdown [FLINK-10070] - Flink cannot be compiled with maven 3.0.x New Feature [FLINK-10022] - Add metrics for input/output buffers Improvement [FLINK-9446] - Compatibility table not up-to-date [FLINK-9765] - Improve CLI responsiveness when cluster is not reachable [FLINK-9806] - Add a canonical link element to documentation HTML [FLINK-9859] - More Akka config options [FLINK-9942] - Guard handlers against null fields in requests [FLINK-9986] - Remove unnecessary information from .version.properties file [FLINK-9987] - Rework ClassLoader E2E test to not rely on .version.properties file [FLINK-10006] - Improve logging in BarrierBuffer [FLINK-10016] - Make YARN/Kerberos end-to-end test stricter `}),e.add({id:230,href:"/2018/08/09/apache-flink-1.6.0-release-announcement/",title:"Apache Flink 1.6.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is proud to announce the 1.6.0 release. Over the past 2 months, the Flink community has worked hard to resolve more than 360 issues. Please check the complete changelog for more details.
 Flink 1.6.0 is the seventh major release in the 1.x.y series. It is API-compatible with previous 1.x.y releases for APIs annotated with the @Public annotation.
 We encourage everyone to download the release and check out the documentation. Feedback through the Flink mailing lists or JIRA is, as always, very much appreciated!
 You can find the binaries on the updated Downloads page on the Flink project site.
@@ -4518,19 +4528,19 @@
 Release Notes # Please review the release notes if you plan to upgrade your Flink setup to Flink 1.6.
 List of Contributors # According to git shortlog, the following 112 people contributed to the 1.6.0 release. Thanks to all contributors!
 Alejandro Alcalde, Alexander Koltsov, Alexey Tsitkin, Aljoscha Krettek, Andreas Fink, Andrey Zagrebin, Arunan Sugunakumar, Ashwin Sinha, Bill Lee, Bowen Li, Chesnay Schepler, Christophe Jolif, Clément Tamisier, Craig Foster, David Anderson, Dawid Wysakowicz, Deepak Sharnma, Dmitrii_Kniazev, EAlexRojas, Elias Levy, Eron Wright, Ethan Li, Fabian Hueske, Florian Schmidt, Franz Thoma, Gabor Gevay, Georgii Gobozov, Haohui Mai, Jamie Grier, Jeff Zhang, Jelmer Kuperus, Jiayi Liao, Jungtaek Lim, Kailash HD, Ken Geis, Ken Krugler, Lakshmi Gururaja Rao, Leonid Ishimnikov, Matrix42, Michael Gendelman, MichealShin, Moser Thomas W, Nico Duldhardt, Nico Kruber, Oleksandr Nitavskyi, PJ Fanning, Patrick Lucas, Pavel Shvetsov, Philippe Duveau, Piotr Nowojski, Qiu Congxian/klion26, Rinat Sharipov, Rong Rong, Rune Skou Larsen, Sayat Satybaldiyev, Shuyi Chen, Stefan Richter, Stephan Ewen, Stephen Parente, Thomas Weise, Till Rohrmann, Timo Walther, Tobii42, Tzu-Li (Gordon) Tai, Viktor Vlasov, Wosin, Xingcan Cui, Xpray, Yan Zhou, Yazdan.JS, Yun Tang, Zhijiang, Zsolt Donca, an4828, aria, binlijin, blueszheng, davidxdh, gyao, hequn8128, hzyuqi1, jerryjzhang, jparkie, juhoautio, kai-chi, kkloudas, klion26, lamber-ken, lincoln-lil, linjun, liurenjie1024, lsy, maqingxiang-it, maxbelov, mayyamus, minwenjun, neoremind, sampathBhat, shankarganesh1234, shuai.xus, sihuazhou, snuyanzin, triones.deng, vinoyang, xueyu, yangshimin, yuemeng, zhangminglei, zhouhai02, zjureel, 军长, 陈梓立
-`}),e.add({id:230,href:"/2018/07/31/apache-flink-1.5.2-released/",title:"Apache Flink 1.5.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.5 series.
+`}),e.add({id:231,href:"/2018/07/31/apache-flink-1.5.2-released/",title:"Apache Flink 1.5.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.5 series.
 This release includes more than 20 fixes and minor improvements for Flink 1.5.1. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.5.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.5.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-9839] - End-to-end test: Streaming job with SSL Bug [FLINK-5750] - Incorrect translation of n-ary Union [FLINK-8161] - Flakey YARNSessionCapacitySchedulerITCase on Travis [FLINK-8731] - TwoInputStreamTaskTest flaky on travis [FLINK-9091] - Failure while enforcing releasability in building flink-json module [FLINK-9380] - Failing end-to-end tests should not clean up logs [FLINK-9439] - DispatcherTest#testJobRecovery dead locks [FLINK-9575] - Potential race condition when removing JobGraph in HA [FLINK-9584] - Unclosed streams in Bucketing-/RollingSink [FLINK-9658] - Test data output directories are no longer cleaned up [FLINK-9706] - DispatcherTest#testSubmittedJobGraphListener fails on Travis [FLINK-9743] - PackagedProgram.extractContainedLibraries fails on Windows [FLINK-9754] - Release scripts refers to non-existing profile [FLINK-9755] - Exceptions in RemoteInputChannel#notifyBufferAvailable() are not propagated to the responsible thread [FLINK-9762] - CoreOptions.TMP_DIRS wrongly managed on Yarn [FLINK-9766] - Incomplete/incorrect cleanup in RemoteInputChannelTest [FLINK-9771] - &quot;Show Plan&quot; option under Submit New Job in WebUI not working [FLINK-9772] - Documentation of Hadoop API outdated [FLINK-9784] - Inconsistent use of &#39;static&#39; in AsyncIOExample.java [FLINK-9793] - When submitting a flink job with yarn-cluster, flink-dist*.jar is repeatedly uploaded [FLINK-9810] - JarListHandler does not close opened jars [FLINK-9838] - Slot request failed Exceptions after completing a job [FLINK-9841] - Web UI only show partial taskmanager log [FLINK-9842] - Job submission fails via CLI with SSL enabled [FLINK-9847] - OneInputStreamTaskTest.testWatermarksNotForwardedWithinChainWhenIdle unstable [FLINK-9857] - Processing-time timers fire too early [FLINK-9860] - Netty resource leak on receiver side [FLINK-9872] - SavepointITCase#testSavepointForJobWithIteration does not properly cancel jobs [FLINK-9908] - Inconsistent state of SlotPool after ExecutionGraph cancellation [FLINK-9910] - Non-queued scheduling failure sometimes does not return the slot [FLINK-9911] - SlotPool#failAllocation is called outside of main thread New Feature [FLINK-9499] - Allow REST API for running a job to provide job configuration as body of POST request Improvement [FLINK-9659] - Remove hard-coded sleeps in bucketing sink E2E test [FLINK-9748] - create_source_release pollutes flink root directory [FLINK-9768] - Only build flink-dist for binary releases [FLINK-9785] - Add remote addresses to LocalTransportException instances [FLINK-9801] - flink-dist is missing dependency on flink-examples [FLINK-9804] - KeyedStateBackend.getKeys() does not work on RocksDB MapState [FLINK-9811] - Add ITCase for interactions of Jar handlers [FLINK-9873] - Log actual state when aborting checkpoint due to task not running [FLINK-9881] - Typo in a function name in table.scala [FLINK-9888] - Remove unsafe defaults from release scripts [FLINK-9909] - Remove cancellation of input futures from ConjunctFutures `}),e.add({id:231,href:"/2018/07/12/apache-flink-1.5.1-released/",title:"Apache Flink 1.5.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.5 series.
+Sub-task [FLINK-9839] - End-to-end test: Streaming job with SSL Bug [FLINK-5750] - Incorrect translation of n-ary Union [FLINK-8161] - Flakey YARNSessionCapacitySchedulerITCase on Travis [FLINK-8731] - TwoInputStreamTaskTest flaky on travis [FLINK-9091] - Failure while enforcing releasability in building flink-json module [FLINK-9380] - Failing end-to-end tests should not clean up logs [FLINK-9439] - DispatcherTest#testJobRecovery dead locks [FLINK-9575] - Potential race condition when removing JobGraph in HA [FLINK-9584] - Unclosed streams in Bucketing-/RollingSink [FLINK-9658] - Test data output directories are no longer cleaned up [FLINK-9706] - DispatcherTest#testSubmittedJobGraphListener fails on Travis [FLINK-9743] - PackagedProgram.extractContainedLibraries fails on Windows [FLINK-9754] - Release scripts refers to non-existing profile [FLINK-9755] - Exceptions in RemoteInputChannel#notifyBufferAvailable() are not propagated to the responsible thread [FLINK-9762] - CoreOptions.TMP_DIRS wrongly managed on Yarn [FLINK-9766] - Incomplete/incorrect cleanup in RemoteInputChannelTest [FLINK-9771] - &quot;Show Plan&quot; option under Submit New Job in WebUI not working [FLINK-9772] - Documentation of Hadoop API outdated [FLINK-9784] - Inconsistent use of &#39;static&#39; in AsyncIOExample.java [FLINK-9793] - When submitting a flink job with yarn-cluster, flink-dist*.jar is repeatedly uploaded [FLINK-9810] - JarListHandler does not close opened jars [FLINK-9838] - Slot request failed Exceptions after completing a job [FLINK-9841] - Web UI only show partial taskmanager log [FLINK-9842] - Job submission fails via CLI with SSL enabled [FLINK-9847] - OneInputStreamTaskTest.testWatermarksNotForwardedWithinChainWhenIdle unstable [FLINK-9857] - Processing-time timers fire too early [FLINK-9860] - Netty resource leak on receiver side [FLINK-9872] - SavepointITCase#testSavepointForJobWithIteration does not properly cancel jobs [FLINK-9908] - Inconsistent state of SlotPool after ExecutionGraph cancellation [FLINK-9910] - Non-queued scheduling failure sometimes does not return the slot [FLINK-9911] - SlotPool#failAllocation is called outside of main thread New Feature [FLINK-9499] - Allow REST API for running a job to provide job configuration as body of POST request Improvement [FLINK-9659] - Remove hard-coded sleeps in bucketing sink E2E test [FLINK-9748] - create_source_release pollutes flink root directory [FLINK-9768] - Only build flink-dist for binary releases [FLINK-9785] - Add remote addresses to LocalTransportException instances [FLINK-9801] - flink-dist is missing dependency on flink-examples [FLINK-9804] - KeyedStateBackend.getKeys() does not work on RocksDB MapState [FLINK-9811] - Add ITCase for interactions of Jar handlers [FLINK-9873] - Log actual state when aborting checkpoint due to task not running [FLINK-9881] - Typo in a function name in table.scala [FLINK-9888] - Remove unsafe defaults from release scripts [FLINK-9909] - Remove cancellation of input futures from ConjunctFutures `}),e.add({id:232,href:"/2018/07/12/apache-flink-1.5.1-released/",title:"Apache Flink 1.5.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.5 series.
 This release includes more than 60 fixes and minor improvements for Flink 1.5.0. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.5.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.5.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.5.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-8977] - End-to-end test: Manually resume job after terminal failure [FLINK-8982] - End-to-end test: Queryable state [FLINK-8989] - End-to-end test: ElasticSearch connector [FLINK-8996] - Include an operator with broadcast and union state [FLINK-9008] - End-to-end test: Quickstarts [FLINK-9320] - Update \`test-ha.sh\` end-to-end test to use general purpose DataStream job [FLINK-9322] - Add exception throwing map function that simulates failures to the general purpose DataStream job [FLINK-9394] - Let externalized checkpoint resume e2e also test rescaling Bug [FLINK-8785] - JobSubmitHandler does not handle JobSubmissionExceptions [FLINK-8795] - Scala shell broken for Flip6 [FLINK-8946] - TaskManager stop sending metrics after JobManager failover [FLINK-9174] - The type of state created in ProccessWindowFunction.proccess() is inconsistency [FLINK-9215] - TaskManager Releasing - org.apache.flink.util.FlinkException [FLINK-9257] - End-to-end tests prints &quot;All tests PASS&quot; even if individual test-script returns non-zero exit code [FLINK-9258] - ConcurrentModificationException in ComponentMetricGroup.getAllVariables [FLINK-9326] - TaskManagerOptions.NUM_TASK_SLOTS does not work for local/embedded mode [FLINK-9374] - Flink Kinesis Producer does not backpressure [FLINK-9398] - Flink CLI list running job returns all jobs except in CREATE state [FLINK-9437] - Revert cypher suite update [FLINK-9458] - Unable to recover from job failure on YARN with NPE [FLINK-9467] - No Watermark display on Web UI [FLINK-9468] - Wrong calculation of outputLimit in LimitedConnectionsFileSystem [FLINK-9493] - Forward exception when releasing a TaskManager at the SlotPool [FLINK-9494] - Race condition in Dispatcher with concurrent granting and revoking of leaderhship [FLINK-9500] - FileUploadHandler does not handle EmptyLastHttpContent [FLINK-9524] - NPE from ProcTimeBoundedRangeOver.scala [FLINK-9530] - Task numRecords metrics broken for chains [FLINK-9554] - flink scala shell doesn&#39;t work in yarn mode [FLINK-9567] - Flink does not release resource in Yarn Cluster mode [FLINK-9570] - SQL Client merging environments uses AbstractMap [FLINK-9580] - Potentially unclosed ByteBufInputStream in RestClient#readRawResponse [FLINK-9627] - Extending &#39;KafkaJsonTableSource&#39; according to comments will result in NPE [FLINK-9629] - Datadog metrics reporter does not have shaded dependencies [FLINK-9633] - Flink doesn&#39;t use the Savepoint path&#39;s filesystem to create the OuptutStream on Task. [FLINK-9634] - Deactivate previous location based scheduling if local recovery is disabled [FLINK-9636] - Network buffer leaks in requesting a batch of segments during canceling [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-9654] - Internal error while deserializing custom Scala TypeSerializer instances [FLINK-9655] - Externalized checkpoint E2E test fails on travis [FLINK-9665] - PrometheusReporter does not properly unregister metrics [FLINK-9676] - Deadlock during canceling task and recycling exclusive buffer [FLINK-9677] - RestClient fails for large uploads [FLINK-9684] - HistoryServerArchiveFetcher not working properly with secure hdfs cluster [FLINK-9693] - Possible memory leak in jobmanager retaining archived checkpoints [FLINK-9708] - Network buffer leaks when buffer request fails during buffer redistribution [FLINK-9769] - FileUploads may be shared across requests [FLINK-9770] - UI jar list broken [FLINK-9789] - Watermark metrics for an operator&amp;task shadow each other New Feature [FLINK-9153] - TaskManagerRunner should support rpc port range [FLINK-9280] - Extend JobSubmitHandler to accept jar files [FLINK-9316] - Expose operator unique ID to the user defined functions in DataStream . [FLINK-9564] - Expose end-to-end module directory to test scripts [FLINK-9599] - Implement generic mechanism to receive files via rest [FLINK-9669] - Introduce task manager assignment store [FLINK-9670] - Introduce slot manager factory [FLINK-9671] - Add configuration to enable task manager isolation. Improvement [FLINK-4301] - Parameterize Flink version in Quickstart bash script [FLINK-8650] - Add tests and documentation for WINDOW clause [FLINK-8654] - Extend quickstart docs on how to submit jobs [FLINK-9109] - Add flink modify command to documentation [FLINK-9355] - Simplify configuration of local recovery to a simple on/off [FLINK-9372] - Typo on Elasticsearch website link (elastic.io --&gt; elastic.co) [FLINK-9409] - Remove flink-avro and flink-json from /opt [FLINK-9456] - Let ResourceManager notify JobManager about failed/killed TaskManagers [FLINK-9508] - General Spell Check on Flink Docs [FLINK-9517] - Fixing broken links on CLI and Upgrade Docs [FLINK-9518] - SSL setup Docs config example has wrong keys password [FLINK-9549] - Fix FlickCEP Docs broken link and minor style changes [FLINK-9573] - Check for leadership with leader session id [FLINK-9594] - Add documentation for e2e test changes introduced with FLINK-9257 [FLINK-9595] - Add instructions to docs about ceased support of KPL version used in Kinesis connector [FLINK-9638] - Add helper script to run single e2e test [FLINK-9672] - Fail fatally if we cannot submit job on added JobGraph signal [FLINK-9707] - LocalFileSystem does not support concurrent directory creations [FLINK-9729] - Duplicate lines for &quot;Weekday name (Sunday .. Saturday)&quot; [FLINK-9734] - Typo &#39;field-deleimiter&#39; in SQL client docs `}),e.add({id:232,href:"/2018/05/18/apache-flink-1.5.0-release-announcement/",title:"Apache Flink 1.5.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is thrilled to announce the 1.5.0 release. Over the past 5 months, the Flink community has been working hard to resolve more than 780 issues. Please check the complete changelog for more detail.
+Sub-task [FLINK-8977] - End-to-end test: Manually resume job after terminal failure [FLINK-8982] - End-to-end test: Queryable state [FLINK-8989] - End-to-end test: ElasticSearch connector [FLINK-8996] - Include an operator with broadcast and union state [FLINK-9008] - End-to-end test: Quickstarts [FLINK-9320] - Update \`test-ha.sh\` end-to-end test to use general purpose DataStream job [FLINK-9322] - Add exception throwing map function that simulates failures to the general purpose DataStream job [FLINK-9394] - Let externalized checkpoint resume e2e also test rescaling Bug [FLINK-8785] - JobSubmitHandler does not handle JobSubmissionExceptions [FLINK-8795] - Scala shell broken for Flip6 [FLINK-8946] - TaskManager stop sending metrics after JobManager failover [FLINK-9174] - The type of state created in ProccessWindowFunction.proccess() is inconsistency [FLINK-9215] - TaskManager Releasing - org.apache.flink.util.FlinkException [FLINK-9257] - End-to-end tests prints &quot;All tests PASS&quot; even if individual test-script returns non-zero exit code [FLINK-9258] - ConcurrentModificationException in ComponentMetricGroup.getAllVariables [FLINK-9326] - TaskManagerOptions.NUM_TASK_SLOTS does not work for local/embedded mode [FLINK-9374] - Flink Kinesis Producer does not backpressure [FLINK-9398] - Flink CLI list running job returns all jobs except in CREATE state [FLINK-9437] - Revert cypher suite update [FLINK-9458] - Unable to recover from job failure on YARN with NPE [FLINK-9467] - No Watermark display on Web UI [FLINK-9468] - Wrong calculation of outputLimit in LimitedConnectionsFileSystem [FLINK-9493] - Forward exception when releasing a TaskManager at the SlotPool [FLINK-9494] - Race condition in Dispatcher with concurrent granting and revoking of leaderhship [FLINK-9500] - FileUploadHandler does not handle EmptyLastHttpContent [FLINK-9524] - NPE from ProcTimeBoundedRangeOver.scala [FLINK-9530] - Task numRecords metrics broken for chains [FLINK-9554] - flink scala shell doesn&#39;t work in yarn mode [FLINK-9567] - Flink does not release resource in Yarn Cluster mode [FLINK-9570] - SQL Client merging environments uses AbstractMap [FLINK-9580] - Potentially unclosed ByteBufInputStream in RestClient#readRawResponse [FLINK-9627] - Extending &#39;KafkaJsonTableSource&#39; according to comments will result in NPE [FLINK-9629] - Datadog metrics reporter does not have shaded dependencies [FLINK-9633] - Flink doesn&#39;t use the Savepoint path&#39;s filesystem to create the OuptutStream on Task. [FLINK-9634] - Deactivate previous location based scheduling if local recovery is disabled [FLINK-9636] - Network buffer leaks in requesting a batch of segments during canceling [FLINK-9646] - ExecutionGraphCoLocationRestartTest.testConstraintsAfterRestart failed on Travis [FLINK-9654] - Internal error while deserializing custom Scala TypeSerializer instances [FLINK-9655] - Externalized checkpoint E2E test fails on travis [FLINK-9665] - PrometheusReporter does not properly unregister metrics [FLINK-9676] - Deadlock during canceling task and recycling exclusive buffer [FLINK-9677] - RestClient fails for large uploads [FLINK-9684] - HistoryServerArchiveFetcher not working properly with secure hdfs cluster [FLINK-9693] - Possible memory leak in jobmanager retaining archived checkpoints [FLINK-9708] - Network buffer leaks when buffer request fails during buffer redistribution [FLINK-9769] - FileUploads may be shared across requests [FLINK-9770] - UI jar list broken [FLINK-9789] - Watermark metrics for an operator&amp;task shadow each other New Feature [FLINK-9153] - TaskManagerRunner should support rpc port range [FLINK-9280] - Extend JobSubmitHandler to accept jar files [FLINK-9316] - Expose operator unique ID to the user defined functions in DataStream . [FLINK-9564] - Expose end-to-end module directory to test scripts [FLINK-9599] - Implement generic mechanism to receive files via rest [FLINK-9669] - Introduce task manager assignment store [FLINK-9670] - Introduce slot manager factory [FLINK-9671] - Add configuration to enable task manager isolation. Improvement [FLINK-4301] - Parameterize Flink version in Quickstart bash script [FLINK-8650] - Add tests and documentation for WINDOW clause [FLINK-8654] - Extend quickstart docs on how to submit jobs [FLINK-9109] - Add flink modify command to documentation [FLINK-9355] - Simplify configuration of local recovery to a simple on/off [FLINK-9372] - Typo on Elasticsearch website link (elastic.io --&gt; elastic.co) [FLINK-9409] - Remove flink-avro and flink-json from /opt [FLINK-9456] - Let ResourceManager notify JobManager about failed/killed TaskManagers [FLINK-9508] - General Spell Check on Flink Docs [FLINK-9517] - Fixing broken links on CLI and Upgrade Docs [FLINK-9518] - SSL setup Docs config example has wrong keys password [FLINK-9549] - Fix FlickCEP Docs broken link and minor style changes [FLINK-9573] - Check for leadership with leader session id [FLINK-9594] - Add documentation for e2e test changes introduced with FLINK-9257 [FLINK-9595] - Add instructions to docs about ceased support of KPL version used in Kinesis connector [FLINK-9638] - Add helper script to run single e2e test [FLINK-9672] - Fail fatally if we cannot submit job on added JobGraph signal [FLINK-9707] - LocalFileSystem does not support concurrent directory creations [FLINK-9729] - Duplicate lines for &quot;Weekday name (Sunday .. Saturday)&quot; [FLINK-9734] - Typo &#39;field-deleimiter&#39; in SQL client docs `}),e.add({id:233,href:"/2018/05/18/apache-flink-1.5.0-release-announcement/",title:"Apache Flink 1.5.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is thrilled to announce the 1.5.0 release. Over the past 5 months, the Flink community has been working hard to resolve more than 780 issues. Please check the complete changelog for more detail.
 Flink 1.5.0 is the sixth major release in the 1.x.y series. As usual, it is API-compatible with previous 1.x.y releases for APIs annotated with the @Public annotation.
 We encourage everyone to download the release and check out the documentation. Feedback through the Flink mailing lists or JIRA is, as always, very much appreciated!
 You can find the binaries on the updated Downloads page on the Flink project site.
@@ -4557,19 +4567,19 @@
 Various Other Features and Improvements # OpenStack provides software for creating public and private clouds on pools of resources. Flink now supports OpenStack’s S3-like file system, Swift, for checkpoint and savepoint storage. Swift can be used without Hadoop dependencies. Reading and writing JSON messages from and to connectors has been improved. It’s now possible to parse a standard JSON schema in order to configure serializers and deserializers. The SQL CLI Client is able to read JSON records from Kafka. Applications can be rescaled without manually triggering a savepoint. Under the hood, Flink will still take a savepoint, stop the application, and rescale it to the new parallelism. Improved metrics for watermarks and latency. Flink now reports the minimum watermark in all operators, including sources. Moreover, the latency metrics were reworked for better integration with common metrics systems. The FileInputFormat (and many derived input formats) now supports reading files from multiple paths. The BucketingSink supports the specification of custom extensions for multiple parts. The CassandraOutputFormat can be used to emit Row objects. The Kinesis consumer allows for more customization. Release Notes # Please review the release notes if you plan to upgrade your Flink setup to Flink 1.5.
 List of Contributors # According to git shortlog, the following 106 people contributed to the 1.5.0 release. Thanks to all contributors!
 Aegeaner, Alejandro Alcalde, Aljoscha Krettek, Andreas Fink, Andrey Zagrebin, Ankit Parashar, Arunan Sugunakumar, Bartłomiej Tartanus, Bowen Li, Cristian, Dan Kelley, David Anderson, Dawid Wysakowicz, Dian Fu, Dmitrii_Kniazev, Dyana Rose, EAlexRojas, Eron Wright, Fabian Hueske, Florian Schmidt, Gabor Gevay, Greg Hogan, Gyula Fora, Jark Wu, Jelmer Kuperus, Joerg Schad, John Eismeier, Kailash HD, Ken Geis, Ken Krugler, Kent Murra, Leonid Ishimnikov, Malcolm Taylor, Matrix42, Michael Fong, Michael Gendelman, Moser Thomas W, Nico Kruber, PJ Fanning, Patrick Lucas, Pavel Shvetsov, Phetsarath, Sourigna, Philip Luppens, Piotr Nowojski, Qiu Congxian/klion26, Razvan, Robert Metzger, Rong Rong, Shuyi Chen, Stefan Richter, Stephan Ewen, Stephen Parente, Steven Langbroek, Thomas Weise, Till Rohrmann, Timo Walther, Tony Wei, Tzu-Li (Gordon) Tai, Ufuk Celebi, Vetriselvan1187, Xingcan Cui, Xpray, Yazdan.JS, Zhijiang, Zohar Mizrahi, aria, biao.liub, binlijin, davidxdh, eastcirclek, eskabetxe, gyao, hequn8128, hzyuqi1, ifndef-SleePy, jparkie, juhoautio, kkloudas, maqingxiang-it, maxbelov, mayyamus, mingleiZhang, neoremind, nichuanlei, okumin, shankarganesh1234, shuai.xus, sihuazhou, summerleafs, sunjincheng121, triones.deng, twalthr, uybhatti, vinoyang, wenlong.lwl, yanghua, yew1eb, yuemeng, zentol, zhangminglei, zhouhai02, zjureel, 军长, 金竹, 王振涛, 陈梓立
-`}),e.add({id:233,href:"/2018/03/15/apache-flink-1.3.3-released/",title:"Apache Flink 1.3.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.3 series.
+`}),e.add({id:234,href:"/2018/03/15/apache-flink-1.3.3-released/",title:"Apache Flink 1.3.3 Released",section:"Flink Blog",content:`The Apache Flink community released the third bugfix version of the Apache Flink 1.3 series.
 This release includes 4 critical fixes related to checkpointing and recovery. The list below includes a detailed list of all fixes.
 We highly recommend all Flink 1.3 series users to upgrade to Flink 1.3.3.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.3.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-7783] - Don&#39;t always remove checkpoints in ZooKeeperCompletedCheckpointStore#recover() Bug [FLINK-7283] - PythonPlanBinderTest issues with python paths [FLINK-8487] - State loss after multiple restart attempts [FLINK-8807] - ZookeeperCompleted checkpoint store can get stuck in infinite loop Improvement [FLINK-8890] - Compare checkpoints with order in CompletedCheckpoint.checkpointsMatch() `}),e.add({id:234,href:"/2018/03/08/apache-flink-1.4.2-released/",title:"Apache Flink 1.4.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.4 series.
+Sub-task [FLINK-7783] - Don&#39;t always remove checkpoints in ZooKeeperCompletedCheckpointStore#recover() Bug [FLINK-7283] - PythonPlanBinderTest issues with python paths [FLINK-8487] - State loss after multiple restart attempts [FLINK-8807] - ZookeeperCompleted checkpoint store can get stuck in infinite loop Improvement [FLINK-8890] - Compare checkpoints with order in CompletedCheckpoint.checkpointsMatch() `}),e.add({id:235,href:"/2018/03/08/apache-flink-1.4.2-released/",title:"Apache Flink 1.4.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.4 series.
 This release includes more than 10 fixes and minor improvements for Flink 1.4.1. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.4.2.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.4.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.4.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.4.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-6321] - RocksDB state backend Checkpointing is not working with KeyedCEP. [FLINK-7756] - RocksDB state backend Checkpointing (Async and Incremental) is not working with CEP. Bug [FLINK-8423] - OperatorChain#pushToOperator catch block may fail with NPE [FLINK-8451] - CaseClassSerializer is not backwards compatible in 1.4 [FLINK-8520] - CassandraConnectorITCase.testCassandraTableSink unstable on Travis [FLINK-8621] - PrometheusReporterTest.endpointIsUnavailableAfterReporterIsClosed unstable on Travis [FLINK-8692] - Mistake in MyMapFunction code snippet [FLINK-8735] - Add savepoint migration ITCase that covers operator state [FLINK-8741] - KafkaFetcher09/010/011 uses wrong user code classloader [FLINK-8772] - FlinkKafkaConsumerBase partitions discover missing a log parameter [FLINK-8791] - Fix documentation on how to link dependencies [FLINK-8798] - Make commons-logging a parent-first pattern [FLINK-8849] - Wrong link from concepts/runtime to doc on chaining Improvement [FLINK-8202] - Update queryable section on configuration page [FLINK-8574] - Add timestamps to travis logging messages [FLINK-8576] - Log message for QueryableState loading failure too verbose [FLINK-8652] - Reduce log level of QueryableStateClient.getKvState() to DEBUG Task [FLINK-8308] - Update yajl-ruby dependency to 1.3.1 or higher `}),e.add({id:235,href:"/2018/02/28/an-overview-of-end-to-end-exactly-once-processing-in-apache-flink-with-apache-kafka-too/",title:"An Overview of End-to-End Exactly-Once Processing in Apache Flink (with Apache Kafka, too!)",section:"Flink Blog",content:`This post is an adaptation of Piotr Nowojski&rsquo;s presentation from Flink Forward Berlin 2017. You can find the slides and a recording of the presentation on the Flink Forward Berlin website.
+Sub-task [FLINK-6321] - RocksDB state backend Checkpointing is not working with KeyedCEP. [FLINK-7756] - RocksDB state backend Checkpointing (Async and Incremental) is not working with CEP. Bug [FLINK-8423] - OperatorChain#pushToOperator catch block may fail with NPE [FLINK-8451] - CaseClassSerializer is not backwards compatible in 1.4 [FLINK-8520] - CassandraConnectorITCase.testCassandraTableSink unstable on Travis [FLINK-8621] - PrometheusReporterTest.endpointIsUnavailableAfterReporterIsClosed unstable on Travis [FLINK-8692] - Mistake in MyMapFunction code snippet [FLINK-8735] - Add savepoint migration ITCase that covers operator state [FLINK-8741] - KafkaFetcher09/010/011 uses wrong user code classloader [FLINK-8772] - FlinkKafkaConsumerBase partitions discover missing a log parameter [FLINK-8791] - Fix documentation on how to link dependencies [FLINK-8798] - Make commons-logging a parent-first pattern [FLINK-8849] - Wrong link from concepts/runtime to doc on chaining Improvement [FLINK-8202] - Update queryable section on configuration page [FLINK-8574] - Add timestamps to travis logging messages [FLINK-8576] - Log message for QueryableState loading failure too verbose [FLINK-8652] - Reduce log level of QueryableStateClient.getKvState() to DEBUG Task [FLINK-8308] - Update yajl-ruby dependency to 1.3.1 or higher `}),e.add({id:236,href:"/2018/02/28/an-overview-of-end-to-end-exactly-once-processing-in-apache-flink-with-apache-kafka-too/",title:"An Overview of End-to-End Exactly-Once Processing in Apache Flink (with Apache Kafka, too!)",section:"Flink Blog",content:`This post is an adaptation of Piotr Nowojski&rsquo;s presentation from Flink Forward Berlin 2017. You can find the slides and a recording of the presentation on the Flink Forward Berlin website.
 Apache Flink 1.4.0, released in December 2017, introduced a significant milestone for stream processing with Flink: a new feature called TwoPhaseCommitSinkFunction (relevant Jira here) that extracts the common logic of the two-phase commit protocol and makes it possible to build end-to-end exactly-once applications with Flink and a selection of data sources and sinks, including Apache Kafka versions 0.11 and beyond. It provides a layer of abstraction and requires a user to implement only a handful of methods to achieve end-to-end exactly-once semantics.
 If that&rsquo;s all you need to hear, let us point you to the relevant place in the Flink documentation, where you can read about how to put TwoPhaseCommitSinkFunction to use.
 But if you&rsquo;d like to learn more, in this post, we&rsquo;ll share an in-depth overview of the new feature and what is happening behind the scenes in Flink.
@@ -4608,13 +4618,13 @@
 Wrapping Up # If you&rsquo;ve made it this far, thanks for staying with us through a detailed post. Here are some key points that we covered:
 Flink&rsquo;s checkpointing system serves as Flink&rsquo;s basis for supporting a two-phase commit protocol and providing end-to-end exactly-once semantics. An advantage of this approach is that Flink does not materialize data in transit the way that some other systems do&ndash;there&rsquo;s no need to write every stage of the computation to disk as is the case is most batch processing. Flink&rsquo;s new TwoPhaseCommitSinkFunction extracts the common logic of the two-phase commit protocol and makes it possible to build end-to-end exactly-once applications with Flink and external systems that support transactions Starting with Flink 1.4.0, both the Pravega and Kafka 0.11 producers provide exactly-once semantics; Kafka introduced transactions for the first time in Kafka 0.11, which is what made the Kafka exactly-once producer possible in Flink. The Kafka 0.11 producer is implemented on top of the TwoPhaseCommitSinkFunction, and it offers very low overhead compared to the at-least-once Kafka producer. We&rsquo;re very excited about what this new feature enables, and we look forward to being able to support additional producers with the TwoPhaseCommitSinkFunction in the future.
 This post first appeared on the data Artisans blog and was contributed to Apache Flink and the Flink blog by the original authors Piotr Nowojski and Mike Winters.
-`}),e.add({id:236,href:"/2018/02/15/apache-flink-1.4.1-released/",title:"Apache Flink 1.4.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.4 series.
+`}),e.add({id:237,href:"/2018/02/15/apache-flink-1.4.1-released/",title:"Apache Flink 1.4.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.4 series.
 This release includes more than 60 fixes and minor improvements for Flink 1.4.0. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.4.1.
 Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.4.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.4.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.4.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-6321] - RocksDB state backend Checkpointing is not working with KeyedCEP. [FLINK-7499] - double buffer release in SpillableSubpartitionView [FLINK-7756] - RocksDB state backend Checkpointing (Async and Incremental) is not working with CEP. [FLINK-7760] - Restore failing from external checkpointing metadata. [FLINK-8323] - Fix Mod scala function bug Bug [FLINK-5506] - Java 8 - CommunityDetection.java:158 - java.lang.NullPointerException [FLINK-6951] - Incompatible versions of httpcomponents jars for Flink kinesis connector [FLINK-7949] - AsyncWaitOperator is not restarting when queue is full [FLINK-8145] - IOManagerAsync not properly shut down in various tests [FLINK-8200] - RocksDBAsyncSnapshotTest should use temp fold instead of fold with fixed name [FLINK-8226] - Dangling reference generated after NFA clean up timed out SharedBufferEntry [FLINK-8230] - NPE in OrcRowInputFormat on nested structs [FLINK-8235] - Cannot run spotbugs for single module [FLINK-8242] - ClassCastException in OrcTableSource.toOrcPredicate [FLINK-8248] - RocksDB state backend Checkpointing is not working with KeyedCEP in 1.4 [FLINK-8249] - Kinesis Producer didnt configure region [FLINK-8261] - Typos in the shading exclusion for jsr305 in the quickstarts [FLINK-8263] - Wrong packaging of flink-core in scala quickstarty [FLINK-8265] - Missing jackson dependency for flink-mesos [FLINK-8270] - TaskManagers do not use correct local path for shipped Keytab files in Yarn deployment modes [FLINK-8275] - Flink YARN deployment with Kerberos enabled not working [FLINK-8278] - Scala examples in Metric documentation do not compile [FLINK-8283] - FlinkKafkaConsumerBase failing on Travis with no output in 10min [FLINK-8295] - Netty shading does not work properly [FLINK-8306] - FlinkKafkaConsumerBaseTest has invalid mocks on final methods [FLINK-8318] - Conflict jackson library with ElasticSearch connector [FLINK-8325] - Add COUNT AGG support constant parameter, i.e. COUNT(*), COUNT(1) [FLINK-8352] - Flink UI Reports No Error on Job Submission Failures [FLINK-8355] - DataSet Should not union a NULL row for AGG without GROUP BY clause. [FLINK-8371] - Buffers are not recycled in a non-spilled SpillableSubpartition upon release [FLINK-8398] - Stabilize flaky KinesisDataFetcherTests [FLINK-8406] - BucketingSink does not detect hadoop file systems [FLINK-8409] - Race condition in KafkaConsumerThread leads to potential NPE [FLINK-8419] - Kafka consumer&#39;s offset metrics are not registered for dynamically discovered partitions [FLINK-8421] - HeapInternalTimerService should reconfigure compatible key / namespace serializers on restore [FLINK-8433] - Update code example for &quot;Managed Operator State&quot; documentation [FLINK-8461] - Wrong logger configurations for shaded Netty [FLINK-8466] - ErrorInfo needs to hold Exception as SerializedThrowable [FLINK-8484] - Kinesis consumer re-reads closed shards on job restart [FLINK-8485] - Running Flink inside Intellij no longer works after upgrading from 1.3.2 to 1.4.0 [FLINK-8489] - Data is not emitted by second ElasticSearch connector [FLINK-8496] - WebUI does not display TM MemorySegment metrics [FLINK-8499] - Kryo must not be child-first loaded [FLINK-8522] - DefaultOperatorStateBackend writes data in checkpoint that is never read. [FLINK-8559] - Exceptions in RocksDBIncrementalSnapshotOperation#takeSnapshot cause job to get stuck [FLINK-8561] - SharedBuffer line 573 uses == to compare BufferEntries instead of .equals. Improvement [FLINK-8079] - Skip remaining E2E tests if one failed [FLINK-8202] - Update queryable section on configuration page [FLINK-8243] - OrcTableSource should recursively read all files in nested directories of the input path. [FLINK-8260] - Document API of Kafka 0.11 Producer [FLINK-8264] - Add Scala to the parent-first loading patterns [FLINK-8271] - upgrade from deprecated classes to AmazonKinesis [FLINK-8287] - Flink Kafka Producer docs should clearly state what partitioner is used by default [FLINK-8296] - Rework FlinkKafkaConsumerBestTest to not use Java reflection for dependency injection [FLINK-8346] - add S3 signature v4 workaround to docs [FLINK-8362] - Shade Elasticsearch dependencies away [FLINK-8455] - Add Hadoop to the parent-first loading patterns [FLINK-8473] - JarListHandler may fail with NPE if directory is deleted [FLINK-8571] - Provide an enhanced KeyedStream implementation to use ForwardPartitioner Test [FLINK-8472] - Extend migration tests for Flink 1.4 `}),e.add({id:237,href:"/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/",title:"Managing Large State in Apache Flink: An Intro to Incremental Checkpointing",section:"Flink Blog",content:`Apache Flink was purpose-built for stateful stream processing. However, what is state in a stream processing application? I defined state and stateful stream processing in a previous blog post, and in case you need a refresher, state is defined as memory in an application&rsquo;s operators that stores information about previously-seen events that you can use to influence the processing of future events.
+Sub-task [FLINK-6321] - RocksDB state backend Checkpointing is not working with KeyedCEP. [FLINK-7499] - double buffer release in SpillableSubpartitionView [FLINK-7756] - RocksDB state backend Checkpointing (Async and Incremental) is not working with CEP. [FLINK-7760] - Restore failing from external checkpointing metadata. [FLINK-8323] - Fix Mod scala function bug Bug [FLINK-5506] - Java 8 - CommunityDetection.java:158 - java.lang.NullPointerException [FLINK-6951] - Incompatible versions of httpcomponents jars for Flink kinesis connector [FLINK-7949] - AsyncWaitOperator is not restarting when queue is full [FLINK-8145] - IOManagerAsync not properly shut down in various tests [FLINK-8200] - RocksDBAsyncSnapshotTest should use temp fold instead of fold with fixed name [FLINK-8226] - Dangling reference generated after NFA clean up timed out SharedBufferEntry [FLINK-8230] - NPE in OrcRowInputFormat on nested structs [FLINK-8235] - Cannot run spotbugs for single module [FLINK-8242] - ClassCastException in OrcTableSource.toOrcPredicate [FLINK-8248] - RocksDB state backend Checkpointing is not working with KeyedCEP in 1.4 [FLINK-8249] - Kinesis Producer didnt configure region [FLINK-8261] - Typos in the shading exclusion for jsr305 in the quickstarts [FLINK-8263] - Wrong packaging of flink-core in scala quickstarty [FLINK-8265] - Missing jackson dependency for flink-mesos [FLINK-8270] - TaskManagers do not use correct local path for shipped Keytab files in Yarn deployment modes [FLINK-8275] - Flink YARN deployment with Kerberos enabled not working [FLINK-8278] - Scala examples in Metric documentation do not compile [FLINK-8283] - FlinkKafkaConsumerBase failing on Travis with no output in 10min [FLINK-8295] - Netty shading does not work properly [FLINK-8306] - FlinkKafkaConsumerBaseTest has invalid mocks on final methods [FLINK-8318] - Conflict jackson library with ElasticSearch connector [FLINK-8325] - Add COUNT AGG support constant parameter, i.e. COUNT(*), COUNT(1) [FLINK-8352] - Flink UI Reports No Error on Job Submission Failures [FLINK-8355] - DataSet Should not union a NULL row for AGG without GROUP BY clause. [FLINK-8371] - Buffers are not recycled in a non-spilled SpillableSubpartition upon release [FLINK-8398] - Stabilize flaky KinesisDataFetcherTests [FLINK-8406] - BucketingSink does not detect hadoop file systems [FLINK-8409] - Race condition in KafkaConsumerThread leads to potential NPE [FLINK-8419] - Kafka consumer&#39;s offset metrics are not registered for dynamically discovered partitions [FLINK-8421] - HeapInternalTimerService should reconfigure compatible key / namespace serializers on restore [FLINK-8433] - Update code example for &quot;Managed Operator State&quot; documentation [FLINK-8461] - Wrong logger configurations for shaded Netty [FLINK-8466] - ErrorInfo needs to hold Exception as SerializedThrowable [FLINK-8484] - Kinesis consumer re-reads closed shards on job restart [FLINK-8485] - Running Flink inside Intellij no longer works after upgrading from 1.3.2 to 1.4.0 [FLINK-8489] - Data is not emitted by second ElasticSearch connector [FLINK-8496] - WebUI does not display TM MemorySegment metrics [FLINK-8499] - Kryo must not be child-first loaded [FLINK-8522] - DefaultOperatorStateBackend writes data in checkpoint that is never read. [FLINK-8559] - Exceptions in RocksDBIncrementalSnapshotOperation#takeSnapshot cause job to get stuck [FLINK-8561] - SharedBuffer line 573 uses == to compare BufferEntries instead of .equals. Improvement [FLINK-8079] - Skip remaining E2E tests if one failed [FLINK-8202] - Update queryable section on configuration page [FLINK-8243] - OrcTableSource should recursively read all files in nested directories of the input path. [FLINK-8260] - Document API of Kafka 0.11 Producer [FLINK-8264] - Add Scala to the parent-first loading patterns [FLINK-8271] - upgrade from deprecated classes to AmazonKinesis [FLINK-8287] - Flink Kafka Producer docs should clearly state what partitioner is used by default [FLINK-8296] - Rework FlinkKafkaConsumerBestTest to not use Java reflection for dependency injection [FLINK-8346] - add S3 signature v4 workaround to docs [FLINK-8362] - Shade Elasticsearch dependencies away [FLINK-8455] - Add Hadoop to the parent-first loading patterns [FLINK-8473] - JarListHandler may fail with NPE if directory is deleted [FLINK-8571] - Provide an enhanced KeyedStream implementation to use ForwardPartitioner Test [FLINK-8472] - Extend migration tests for Flink 1.4 `}),e.add({id:238,href:"/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/",title:"Managing Large State in Apache Flink: An Intro to Incremental Checkpointing",section:"Flink Blog",content:`Apache Flink was purpose-built for stateful stream processing. However, what is state in a stream processing application? I defined state and stateful stream processing in a previous blog post, and in case you need a refresher, state is defined as memory in an application&rsquo;s operators that stores information about previously-seen events that you can use to influence the processing of future events.
 State is a fundamental, enabling concept in stream processing required for a majority of complex use cases. Some examples highlighted in the Flink documentation:
 When an application searches for certain event patterns, the state stores the sequence of events encountered so far. When aggregating events per minute, the state holds the pending aggregates. When training a machine learning model over a stream of data points, the state holds the current version of the model parameters. However, stateful stream processing is only useful in production environments if the state is fault tolerant. &ldquo;Fault tolerance&rdquo; means that even if there&rsquo;s a software or machine failure, the computed end-result is accurate, with no data loss or double-counting of events.
 Flink&rsquo;s fault tolerance has always been a powerful and popular feature, minimizing the impact of software or machine failure on your business and making it possible to guarantee exactly-once results from a Flink application.
@@ -4640,7 +4650,7 @@
 Though the feature can lead to a substantial improvement in checkpoint time for users with a large state, there are trade-offs to consider with incremental checkpointing. Overall, the process reduces the checkpointing time during normal operations but can lead to a longer recovery time depending on the size of your state. If the cluster failure is particularly severe and the Flink TaskManagers have to read from multiple checkpoints, recovery can be a slower operation than when using non-incremental checkpointing. You can also no longer delete old checkpoints as newer checkpoints need them, and the history of differences between checkpoints can grow indefinitely over time. You need to plan for larger distributed storage to maintain the checkpoints and the network overhead to read from it.
 There are some strategies for improving the convenience/performance trade-off, and I recommend you read the Flink documentation for more details.
 This post originally appeared on the data Artisans blog and was contributed to the Flink blog by Stefan Richter and Chris Ward.
-`}),e.add({id:238,href:"/2017/12/21/apache-flink-in-2017-year-in-review/",title:"Apache Flink in 2017: Year in Review",section:"Flink Blog",content:`2017 was another exciting year for the Apache Flink® community, with 3 major version releases (Flink 1.2.0 in February, Flink 1.3.0 in June, and Flink 1.4.0 in December) and the first-ever Flink Forward in San Francisco, giving Flink community members in another corner of the globe an opportunity to connect. Users shared details about their innovative production deployments, redefining what is possible with a modern stream processing framework like Flink.
+`}),e.add({id:239,href:"/2017/12/21/apache-flink-in-2017-year-in-review/",title:"Apache Flink in 2017: Year in Review",section:"Flink Blog",content:`2017 was another exciting year for the Apache Flink® community, with 3 major version releases (Flink 1.2.0 in February, Flink 1.3.0 in June, and Flink 1.4.0 in December) and the first-ever Flink Forward in San Francisco, giving Flink community members in another corner of the globe an opportunity to connect. Users shared details about their innovative production deployments, redefining what is possible with a modern stream processing framework like Flink.
 In this post, we&rsquo;ll look back on the project&rsquo;s progress over the course of 2017, and we&rsquo;ll also preview what 2018 has in store.
 {%toc%}
 Community Growth # Github # First, here&rsquo;s a summary of community statistics from GitHub. At the time of writing:
@@ -4668,7 +4678,7 @@
 Work is already underway on a number of these features, and some will be included in Flink 1.5 at the beginning of 2018.
 Improved BLOB storage architecture, as described in FLIP-19 to consolidate API usage and improve concurrency. Integration of SQL and CEP, as described in FLIP-20 to allow developers to create complex event processing (CEP) patterns using SQL statements. Unified checkpoints and savepoints, as described in FLIP-10, to allow savepoints to be triggered automatically–important for program updates for the sake of error handling because savepoints allow the user to modify both the job and Flink version whereas checkpoints can only be recovered with the same job. An improved Flink deployment and process model, as described in FLIP-6, to allow for better integration with Flink and cluster managers and deployment technologies such as Mesos, Docker, and Kubernetes. Fine-grained recovery from task failures, as described in FLIP-1 to improve recovery efficiency and only re-execute failed tasks, reducing the amount of state that Flink needs to transfer on recovery. An SQL Client, as described in FLIP-24 to add a service and a client to execute SQL queries against batch and streaming tables. Serving of machine learning models, as described in FLIP-23 to add a library that allows users to apply offline-trained machine learning models to data streams. If you&rsquo;re interested in getting involved with Flink, we encourage you to take a look at the FLIPs and to join the discussion via the Flink mailing lists.
 Lastly, we&rsquo;d like to extend a sincere thank you to all the Flink community for making 2017 a great year!
-`}),e.add({id:239,href:"/2017/12/12/apache-flink-1.4.0-release-announcement/",title:"Apache Flink 1.4.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the 1.4.0 release. Over the past 5 months, the Flink community has been working hard to resolve more than 900 issues. See the complete changelog for more detail.
+`}),e.add({id:240,href:"/2017/12/12/apache-flink-1.4.0-release-announcement/",title:"Apache Flink 1.4.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the 1.4.0 release. Over the past 5 months, the Flink community has been working hard to resolve more than 900 issues. See the complete changelog for more detail.
 This is the fifth major release in the 1.x.y series. It is API-compatible with the other 1.x.y releases for APIs annotated with the @Public annotation.
 We encourage everyone to download the release and check out the documentation.
 Feedback through the Flink mailing lists is, as always, gladly encouraged!
@@ -4713,7 +4723,7 @@
 Bundled S3 FileSystems # Flink 1.4 comes bundled with two different S3 FileSystems based on the Presto S3 FileSystem and the Hadoop S3A FileSystem. They don&rsquo;t have dependencies (because all dependencies are shaded/relocated) and you can use them by dropping the respective file from the opt directory into the lib directory of your Flink installation. For more information about this, please refer to the documentation.
 List of Contributors # According to git shortlog, the following 106 people contributed to the 1.4.0 release. Thank you to all contributors!
 Ajay Tripathy, Alejandro Alcalde, Aljoscha Krettek, Bang, Phiradet, Bowen Li, Chris Ward, Cristian, Dan Kelley, David Anderson, Dawid Wysakowicz, Dian Fu, Dmitrii Kniazev, DmytroShkvyra, Fabian Hueske, FlorianFan, Fokko Driesprong, Gabor Gevay, Gary Yao, Greg Hogan, Haohui Mai, Hequn Cheng, James Lafa, Jark Wu, Jie Shen, Jing Fan, JingsongLi, Joerg Schad, Juan Paulo Gutierrez, Ken Geis, Kent Murra, Kurt Young, Lim Chee Hau, Maximilian Bode, Michael Fong, Mike Kobit, Mikhail Lipkovich, Nico Kruber, Novotnik, Petr, Nycholas de Oliveira e Oliveira, Patrick Lucas, Piotr Nowojski, Robert Metzger, Rodrigo Bonifacio, Rong Rong, Scott Kidder, Sebastian Klemke, Shuyi Chen, Stefan Richter, Stephan Ewen, Svend Vanderveken, Till Rohrmann, Tony Wei, Tzu-Li (Gordon) Tai, Ufuk Celebi, Usman Younas, Vetriselvan1187, Vishnu Viswanath, Wright, Eron, Xingcan Cui, Xpray, Yestin, Yonatan Most, Zhenzhong Xu, Zhijiang, adebski, asdf2014, bbayani, biao.liub, cactuslrd.lird, dawidwys, desktop, fengyelei, godfreyhe, gosubpl, gyao, hongyuhong, huafengw, kkloudas, kl0u, lincoln-lil, lingjinjiang, mengji.fy, minwenjun, mtunique, p1tz, paul, rtudoran, shaoxuan-wang, sirko bretschneider, sunjincheng121, tedyu, twalthr, uybhatti, wangmiao1981, yew1eb, z00376786, zentol, zhangminglei, zhe li, zhouhai02, zjureel, 付典, 军长, 宝牛, 淘江, 金竹
-`}),e.add({id:240,href:"/2017/11/21/looking-ahead-to-apache-flink-1.4.0-and-1.5.0/",title:"Looking Ahead to Apache Flink 1.4.0 and 1.5.0",section:"Flink Blog",content:`The Apache Flink 1.4.0 release is on track to happen in the next couple of weeks, and for all of the readers out there who haven’t been following the release discussion on Flink’s developer mailing list, we’d like to provide some details on what’s coming in Flink 1.4.0 as well as a preview of what the Flink community will save for 1.5.0.
+`}),e.add({id:241,href:"/2017/11/21/looking-ahead-to-apache-flink-1.4.0-and-1.5.0/",title:"Looking Ahead to Apache Flink 1.4.0 and 1.5.0",section:"Flink Blog",content:`The Apache Flink 1.4.0 release is on track to happen in the next couple of weeks, and for all of the readers out there who haven’t been following the release discussion on Flink’s developer mailing list, we’d like to provide some details on what’s coming in Flink 1.4.0 as well as a preview of what the Flink community will save for 1.5.0.
 Both releases include ambitious features that we believe will move Flink to an entirely new level in terms of the types of problems it can solve and applications it can support. The community deserves lots of credit for its hard work over the past few months, and we’re excited to see these features in the hands of users.
 This post will describe how the community plans to get there and the rationale behind the approach.
 Coming soon: Major Changes to Flink’s Runtime # There are 3 significant improvements to the Apache Flink engine that the community has nearly completed and that will have a meaningful impact on Flink’s operability and performance.
@@ -4734,7 +4744,7 @@
 A significantly improved dependency structure, removing many of Flink’s dependencies and subtle runtime conflicts. This increases overall stability and removes friction when embedding Flink or calling Flink &ldquo;library style&rdquo;. Reversed class loading for dynamically-loaded user code, allowing for different dependencies than those included in the core framework. An Apache Kafka 0.11 exactly-once producer, making it possible to build end-to-end exactly once applications with Flink and Kafka. Streaming SQL JOIN based on processing time and event time, which gives users the full advantage of Flink’s time handling while using a SQL JOIN. Table API / Streaming SQL Source and Sink Additions, including a Kafka 0.11 source and JDBC sink. Hadoop-free Flink, meaning that users who don’t rely on any Hadoop components (such as YARN or HDFS) in their Flink applications can use Flink without Hadoop for the first time. Improvements to queryable state, including a more container-friendly architecture, a more user-friendly API that hides configuration parameters, and the groundwork to be able to expose window state (the state of an in-flight window) in the future. Connector improvements and fixes for a range of connectors including Kafka, Apache Cassandra, Amazon Kinesis, and more. Improved RPC performance for faster recovery from failure The community decided it was best to get these features into a stable version of Flink as soon as possible, and the separation of what could have been a single (and very substantial) Flink 1.4 release into 1.4 and 1.5 serves that purpose.
 We’re excited by what each of these represents for Apache Flink, and we’d like to extend our thanks to the Flink community for all of their hard work.
 If you’d like to follow along with release discussions, please subscribe to the dev@ mailing list.
-`}),e.add({id:241,href:"/2017/08/05/apache-flink-1.3.2-released/",title:"Apache Flink 1.3.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.3 series.
+`}),e.add({id:242,href:"/2017/08/05/apache-flink-1.3.2-released/",title:"Apache Flink 1.3.2 Released",section:"Flink Blog",content:`The Apache Flink community released the second bugfix version of the Apache Flink 1.3 series.
 This release includes more than 60 fixes and minor improvements for Flink 1.3.1. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.3.2.
 Important Notice: A user reported a bug in the FlinkKafkaConsumer (FLINK-7143) that is causing incorrect partition assignment in large Kafka deployments in the presence of inconsistent broker metadata. In that case multiple parallel instances of the FlinkKafkaConsumer may read from the same topic partition, leading to data duplication. In Flink 1.3.2 this bug is fixed but incorrect assignments from Flink 1.3.0 and 1.3.1 cannot be automatically fixed by upgrading to Flink 1.3.2 via a savepoint because the upgraded version would resume the wrong partition assignment from the savepoint. If you believe you are affected by this bug (seeing messages from some partitions duplicated) please refer to the JIRA issue for an upgrade path that works around that.
@@ -4743,7 +4753,7 @@
 The default Kafka version for Flink Kafka Consumer 0.10 was bumped from 0.10.0.1 to 0.10.2.1. Some default values for configurations of AWS API call behaviors in the Flink Kinesis Consumer were adapted for better default consumption performance: 1) SHARD_GETRECORDS_MAX default changed to 10,000, and 2) SHARD_GETRECORDS_INTERVAL_MILLIS default changed to 200ms. Updated Maven dependencies:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.3.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 List of resolved issues:
-Sub-task [FLINK-6665] - Pass a ScheduledExecutorService to the RestartStrategy [FLINK-6667] - Pass a callback type to the RestartStrategy, rather than the full ExecutionGraph [FLINK-6680] - App &amp; Flink migration guide: updates for the 1.3 release Bug [FLINK-5488] - yarnClient should be closed in AbstractYarnClusterDescriptor for error conditions [FLINK-6376] - when deploy flink cluster on the yarn, it is lack of hdfs delegation token. [FLINK-6541] - Jar upload directory not created [FLINK-6654] - missing maven dependency on &quot;flink-shaded-hadoop2-uber&quot; in flink-dist [FLINK-6655] - Misleading error message when HistoryServer path is empty [FLINK-6742] - Improve error message when savepoint migration fails due to task removal [FLINK-6774] - build-helper-maven-plugin version not set [FLINK-6806] - rocksdb is not listed as state backend in doc [FLINK-6843] - ClientConnectionTest fails on travis [FLINK-6867] - Elasticsearch 1.x ITCase still instable due to embedded node instability [FLINK-6918] - Failing tests: ChainLengthDecreaseTest and ChainLengthIncreaseTest [FLINK-6945] - TaskCancelAsyncProducerConsumerITCase.testCancelAsyncProducerAndConsumer instable test case [FLINK-6964] - Fix recovery for incremental checkpoints in StandaloneCompletedCheckpointStore [FLINK-6965] - Avro is missing snappy dependency [FLINK-6987] - TextInputFormatTest fails when run in path containing spaces [FLINK-6996] - FlinkKafkaProducer010 doesn&#39;t guarantee at-least-once semantic [FLINK-7005] - Optimization steps are missing for nested registered tables [FLINK-7011] - Instable Kafka testStartFromKafkaCommitOffsets failures on Travis [FLINK-7025] - Using NullByteKeySelector for Unbounded ProcTime NonPartitioned Over [FLINK-7034] - GraphiteReporter cannot recover from lost connection [FLINK-7038] - Several misused &quot;KeyedDataStream&quot; term in docs and Javadocs [FLINK-7041] - Deserialize StateBackend from JobCheckpointingSettings with user classloader [FLINK-7132] - Fix BulkIteration parallelism [FLINK-7133] - Fix Elasticsearch version interference [FLINK-7137] - Flink table API defaults top level fields as nullable and all nested fields within CompositeType as non-nullable [FLINK-7143] - Partition assignment for Kafka consumer is not stable [FLINK-7154] - Missing call to build CsvTableSource example [FLINK-7158] - Wrong test jar dependency in flink-clients [FLINK-7177] - DataSetAggregateWithNullValuesRule fails creating null literal for non-nullable type [FLINK-7178] - Datadog Metric Reporter Jar is Lacking Dependencies [FLINK-7180] - CoGroupStream perform checkpoint failed [FLINK-7195] - FlinkKafkaConsumer should not respect fetched partitions to filter restored partition states [FLINK-7216] - ExecutionGraph can perform concurrent global restarts to scheduling [FLINK-7225] - Cutoff exception message in StateDescriptor [FLINK-7226] - REST responses contain invalid content-encoding header [FLINK-7231] - SlotSharingGroups are not always released in time for new restarts [FLINK-7234] - Fix CombineHint documentation [FLINK-7241] - Fix YARN high availability documentation [FLINK-7255] - ListStateDescriptor example uses wrong constructor [FLINK-7258] - IllegalArgumentException in Netty bootstrap with large memory state segment size [FLINK-7266] - Don&#39;t attempt to delete parent directory on S3 [FLINK-7268] - Zookeeper Checkpoint Store interacting with Incremental State Handles can lead to loss of handles [FLINK-7281] - Fix various issues in (Maven) release infrastructure Improvement [FLINK-6365] - Adapt default values of the Kinesis connector [FLINK-6575] - Disable all tests on Windows that use HDFS [FLINK-6682] - Improve error message in case parallelism exceeds maxParallelism [FLINK-6789] - Remove duplicated test utility reducer in optimizer [FLINK-6874] - Static and transient fields ignored for POJOs [FLINK-6898] - Limit size of operator component in metric name [FLINK-6937] - Fix link markdown in Production Readiness Checklist doc [FLINK-6940] - Clarify the effect of configuring per-job state backend [FLINK-6998] - Kafka connector needs to expose metrics for failed/successful offset commits in the Kafka Consumer callback [FLINK-7004] - Switch to Travis Trusty image [FLINK-7032] - Intellij is constantly changing language level of sub projects back to 1.6 [FLINK-7069] - Catch exceptions for each reporter separately [FLINK-7149] - Add checkpoint ID to &#39;sendValues()&#39; in GenericWriteAheadSink [FLINK-7164] - Extend integration tests for (externalised) checkpoints, checkpoint store [FLINK-7174] - Bump dependency of Kafka 0.10.x to the latest one [FLINK-7211] - Exclude Gelly javadoc jar from release [FLINK-7224] - Incorrect Javadoc description in all Kafka consumer versions [FLINK-7228] - Harden HistoryServerStaticFileHandlerTest [FLINK-7233] - TaskManagerHeapSizeCalculationJavaBashTest failed on Travis [FLINK-7287] - test instability in Kafka010ITCase.testCommitOffsetsToKafka [FLINK-7290] - Make release scripts modular `}),e.add({id:242,href:"/2017/07/04/a-deep-dive-into-rescalable-state-in-apache-flink/",title:"A Deep Dive into Rescalable State in Apache Flink",section:"Flink Blog",content:`Apache Flink 1.2.0, released in February 2017, introduced support for rescalable state. This post provides a detailed overview of stateful stream processing and rescalable state in Flink. An Intro to Stateful Stream Processing # At a high level, we can consider state in stream processing as memory in operators that remembers information about past input and can be used to influence the processing of future input.
+Sub-task [FLINK-6665] - Pass a ScheduledExecutorService to the RestartStrategy [FLINK-6667] - Pass a callback type to the RestartStrategy, rather than the full ExecutionGraph [FLINK-6680] - App &amp; Flink migration guide: updates for the 1.3 release Bug [FLINK-5488] - yarnClient should be closed in AbstractYarnClusterDescriptor for error conditions [FLINK-6376] - when deploy flink cluster on the yarn, it is lack of hdfs delegation token. [FLINK-6541] - Jar upload directory not created [FLINK-6654] - missing maven dependency on &quot;flink-shaded-hadoop2-uber&quot; in flink-dist [FLINK-6655] - Misleading error message when HistoryServer path is empty [FLINK-6742] - Improve error message when savepoint migration fails due to task removal [FLINK-6774] - build-helper-maven-plugin version not set [FLINK-6806] - rocksdb is not listed as state backend in doc [FLINK-6843] - ClientConnectionTest fails on travis [FLINK-6867] - Elasticsearch 1.x ITCase still instable due to embedded node instability [FLINK-6918] - Failing tests: ChainLengthDecreaseTest and ChainLengthIncreaseTest [FLINK-6945] - TaskCancelAsyncProducerConsumerITCase.testCancelAsyncProducerAndConsumer instable test case [FLINK-6964] - Fix recovery for incremental checkpoints in StandaloneCompletedCheckpointStore [FLINK-6965] - Avro is missing snappy dependency [FLINK-6987] - TextInputFormatTest fails when run in path containing spaces [FLINK-6996] - FlinkKafkaProducer010 doesn&#39;t guarantee at-least-once semantic [FLINK-7005] - Optimization steps are missing for nested registered tables [FLINK-7011] - Instable Kafka testStartFromKafkaCommitOffsets failures on Travis [FLINK-7025] - Using NullByteKeySelector for Unbounded ProcTime NonPartitioned Over [FLINK-7034] - GraphiteReporter cannot recover from lost connection [FLINK-7038] - Several misused &quot;KeyedDataStream&quot; term in docs and Javadocs [FLINK-7041] - Deserialize StateBackend from JobCheckpointingSettings with user classloader [FLINK-7132] - Fix BulkIteration parallelism [FLINK-7133] - Fix Elasticsearch version interference [FLINK-7137] - Flink table API defaults top level fields as nullable and all nested fields within CompositeType as non-nullable [FLINK-7143] - Partition assignment for Kafka consumer is not stable [FLINK-7154] - Missing call to build CsvTableSource example [FLINK-7158] - Wrong test jar dependency in flink-clients [FLINK-7177] - DataSetAggregateWithNullValuesRule fails creating null literal for non-nullable type [FLINK-7178] - Datadog Metric Reporter Jar is Lacking Dependencies [FLINK-7180] - CoGroupStream perform checkpoint failed [FLINK-7195] - FlinkKafkaConsumer should not respect fetched partitions to filter restored partition states [FLINK-7216] - ExecutionGraph can perform concurrent global restarts to scheduling [FLINK-7225] - Cutoff exception message in StateDescriptor [FLINK-7226] - REST responses contain invalid content-encoding header [FLINK-7231] - SlotSharingGroups are not always released in time for new restarts [FLINK-7234] - Fix CombineHint documentation [FLINK-7241] - Fix YARN high availability documentation [FLINK-7255] - ListStateDescriptor example uses wrong constructor [FLINK-7258] - IllegalArgumentException in Netty bootstrap with large memory state segment size [FLINK-7266] - Don&#39;t attempt to delete parent directory on S3 [FLINK-7268] - Zookeeper Checkpoint Store interacting with Incremental State Handles can lead to loss of handles [FLINK-7281] - Fix various issues in (Maven) release infrastructure Improvement [FLINK-6365] - Adapt default values of the Kinesis connector [FLINK-6575] - Disable all tests on Windows that use HDFS [FLINK-6682] - Improve error message in case parallelism exceeds maxParallelism [FLINK-6789] - Remove duplicated test utility reducer in optimizer [FLINK-6874] - Static and transient fields ignored for POJOs [FLINK-6898] - Limit size of operator component in metric name [FLINK-6937] - Fix link markdown in Production Readiness Checklist doc [FLINK-6940] - Clarify the effect of configuring per-job state backend [FLINK-6998] - Kafka connector needs to expose metrics for failed/successful offset commits in the Kafka Consumer callback [FLINK-7004] - Switch to Travis Trusty image [FLINK-7032] - Intellij is constantly changing language level of sub projects back to 1.6 [FLINK-7069] - Catch exceptions for each reporter separately [FLINK-7149] - Add checkpoint ID to &#39;sendValues()&#39; in GenericWriteAheadSink [FLINK-7164] - Extend integration tests for (externalised) checkpoints, checkpoint store [FLINK-7174] - Bump dependency of Kafka 0.10.x to the latest one [FLINK-7211] - Exclude Gelly javadoc jar from release [FLINK-7224] - Incorrect Javadoc description in all Kafka consumer versions [FLINK-7228] - Harden HistoryServerStaticFileHandlerTest [FLINK-7233] - TaskManagerHeapSizeCalculationJavaBashTest failed on Travis [FLINK-7287] - test instability in Kafka010ITCase.testCommitOffsetsToKafka [FLINK-7290] - Make release scripts modular `}),e.add({id:243,href:"/2017/07/04/a-deep-dive-into-rescalable-state-in-apache-flink/",title:"A Deep Dive into Rescalable State in Apache Flink",section:"Flink Blog",content:`Apache Flink 1.2.0, released in February 2017, introduced support for rescalable state. This post provides a detailed overview of stateful stream processing and rescalable state in Flink. An Intro to Stateful Stream Processing # At a high level, we can consider state in stream processing as memory in operators that remembers information about past input and can be used to influence the processing of future input.
 In contrast, operators in stateless stream processing only consider their current inputs, without further context and knowledge about the past. A simple example to illustrate this difference: let us consider a source stream that emits events with schema e = {event_id:int, event_value:int}. Our goal is, for each event, to extract and output the event_value. We can easily achieve this with a simple source-map-sink pipeline, where the map function extracts the event_value from the event and emits it downstream to an outputting sink. This is an instance of stateless stream processing.
 But what if we want to modify our job to output the event_value only if it is larger than the value from the previous event? In this case, our map function obviously needs some way to remember the event_value from a past event — and so this is an instance of stateful stream processing.
 This example should demonstrate that state is a fundamental, enabling concept in stream processing that is required for a majority of interesting use cases.
@@ -4793,7 +4803,7 @@
 …for Flink 1.4.0 and beyond.
 If you’d like to learn more, we recommend starting with the Apache Flink documentation.
 This is an excerpt from a post that originally appeared on the data Artisans blog. If you&rsquo;d like to read the original post in its entirety, you can find it here (external link).
-`}),e.add({id:243,href:"/2017/06/23/apache-flink-1.3.1-released/",title:"Apache Flink 1.3.1 Released",section:"Flink Blog",content:"The Apache Flink community released the first bugfix version of the Apache Flink 1.3 series.\nThis release includes 50 fixes and minor improvements for Flink 1.3.0. The list below includes a detailed list of all fixes.\nWe highly recommend all users to upgrade to Flink 1.3.1.\n&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.3.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.\nBug [FLINK-6492] - Unclosed DataOutputViewStream in GenericArraySerializerConfigSnapshot#write() [FLINK-6602] - Table source with defined time attributes allows empty string [FLINK-6652] - Problem with DelimitedInputFormat [FLINK-6659] - RocksDBMergeIteratorTest, SavepointITCase leave temporary directories behind [FLINK-6669] - [Build] Scala style check errror on Windows [FLINK-6685] - SafetyNetCloseableRegistry is closed prematurely in Task::triggerCheckpointBarrier [FLINK-6772] - Incorrect ordering of matched state events in Flink CEP [FLINK-6775] - StateDescriptor cannot be shared by multiple subtasks [FLINK-6780] - ExternalTableSource should add time attributes in the row type [FLINK-6783] - Wrongly extracted TypeInformations for WindowedStream::aggregate [FLINK-6797] - building docs fails with bundler 1.15 [FLINK-6801] - PojoSerializerConfigSnapshot cannot deal with missing Pojo fields [FLINK-6804] - Inconsistent state migration behaviour between different state backends [FLINK-6807] - Elasticsearch 5 connector artifact not published to maven [FLINK-6808] - Stream join fails when checkpointing is enabled [FLINK-6809] - side outputs documentation: wrong variable name in java example code [FLINK-6812] - Elasticsearch 5 release artifacts not published to Maven central [FLINK-6815] - Javadocs don&#39;t work anymore in Flink 1.4-SNAPSHOT [FLINK-6816] - Fix wrong usage of Scala string interpolation in Table API [FLINK-6833] - Race condition: Asynchronous checkpointing task can fail completed StreamTask [FLINK-6844] - TraversableSerializer should implement compatibility methods [FLINK-6848] - Extend the managed state docs with a Scala example [FLINK-6853] - Migrating from Flink 1.1 fails for FlinkCEP [FLINK-6869] - Scala serializers do not have the serialVersionUID specified [FLINK-6875] - Remote DataSet API job submission timing out [FLINK-6881] - Creating a table from a POJO and defining a time attribute fails [FLINK-6883] - Serializer for collection of Scala case classes are generated with different anonymous class names in 1.3 [FLINK-6886] - Fix Timestamp field can not be selected in event time case when toDataStream[T], `T` not a `Row` Type. [FLINK-6896] - Creating a table from a POJO and use table sink to output fail [FLINK-6899] - Wrong state array size in NestedMapsStateTable [FLINK-6914] - TrySerializer#ensureCompatibility causes StackOverflowException [FLINK-6915] - EnumValueSerializer broken [FLINK-6921] - EnumValueSerializer cannot properly handle appended enum values [FLINK-6922] - Enum(Value)SerializerConfigSnapshot uses Java serialization to store enum values [FLINK-6930] - Selecting window start / end on row-based Tumble/Slide window causes NPE [FLINK-6932] - Update the inaccessible Dataflow Model paper link [FLINK-6941] - Selecting window start / end on over window causes field not resolve exception [FLINK-6948] - EnumValueSerializer cannot handle removed enum values Improvement [FLINK-5354] - Split up Table API documentation into multiple pages [FLINK-6038] - Add deep links to Apache Bahir Flink streaming connector documentations [FLINK-6796] - Allow setting the user code class loader for AbstractStreamOperatorTestHarness [FLINK-6803] - Add test for PojoSerializer when Pojo changes [FLINK-6859] - StateCleaningCountTrigger should not delete timer [FLINK-6929] - Add documentation for Table API OVER windows [FLINK-6952] - Add link to Javadocs [FLINK-6748] - Table API / SQL Docs: Table API Page Test [FLINK-6830] - Add ITTests for savepoint migration from 1.3 [FLINK-6320] - Flakey JobManagerHAJobGraphRecoveryITCase [FLINK-6744] - Flaky ExecutionGraphSchedulingTest [FLINK-6913] - Instable StatefulJobSavepointMigrationITCase.testRestoreSavepoint "}),e.add({id:244,href:"/2017/06/01/apache-flink-1.3.0-release-announcement/",title:"Apache Flink 1.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the 1.3.0 release. Over the past 4 months, the Flink community has been working hard to resolve more than 680 issues. See the complete changelog for more detail.
+`}),e.add({id:244,href:"/2017/06/23/apache-flink-1.3.1-released/",title:"Apache Flink 1.3.1 Released",section:"Flink Blog",content:"The Apache Flink community released the first bugfix version of the Apache Flink 1.3 series.\nThis release includes 50 fixes and minor improvements for Flink 1.3.0. The list below includes a detailed list of all fixes.\nWe highly recommend all users to upgrade to Flink 1.3.1.\n&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.3.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.3.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.\nBug [FLINK-6492] - Unclosed DataOutputViewStream in GenericArraySerializerConfigSnapshot#write() [FLINK-6602] - Table source with defined time attributes allows empty string [FLINK-6652] - Problem with DelimitedInputFormat [FLINK-6659] - RocksDBMergeIteratorTest, SavepointITCase leave temporary directories behind [FLINK-6669] - [Build] Scala style check errror on Windows [FLINK-6685] - SafetyNetCloseableRegistry is closed prematurely in Task::triggerCheckpointBarrier [FLINK-6772] - Incorrect ordering of matched state events in Flink CEP [FLINK-6775] - StateDescriptor cannot be shared by multiple subtasks [FLINK-6780] - ExternalTableSource should add time attributes in the row type [FLINK-6783] - Wrongly extracted TypeInformations for WindowedStream::aggregate [FLINK-6797] - building docs fails with bundler 1.15 [FLINK-6801] - PojoSerializerConfigSnapshot cannot deal with missing Pojo fields [FLINK-6804] - Inconsistent state migration behaviour between different state backends [FLINK-6807] - Elasticsearch 5 connector artifact not published to maven [FLINK-6808] - Stream join fails when checkpointing is enabled [FLINK-6809] - side outputs documentation: wrong variable name in java example code [FLINK-6812] - Elasticsearch 5 release artifacts not published to Maven central [FLINK-6815] - Javadocs don&#39;t work anymore in Flink 1.4-SNAPSHOT [FLINK-6816] - Fix wrong usage of Scala string interpolation in Table API [FLINK-6833] - Race condition: Asynchronous checkpointing task can fail completed StreamTask [FLINK-6844] - TraversableSerializer should implement compatibility methods [FLINK-6848] - Extend the managed state docs with a Scala example [FLINK-6853] - Migrating from Flink 1.1 fails for FlinkCEP [FLINK-6869] - Scala serializers do not have the serialVersionUID specified [FLINK-6875] - Remote DataSet API job submission timing out [FLINK-6881] - Creating a table from a POJO and defining a time attribute fails [FLINK-6883] - Serializer for collection of Scala case classes are generated with different anonymous class names in 1.3 [FLINK-6886] - Fix Timestamp field can not be selected in event time case when toDataStream[T], `T` not a `Row` Type. [FLINK-6896] - Creating a table from a POJO and use table sink to output fail [FLINK-6899] - Wrong state array size in NestedMapsStateTable [FLINK-6914] - TrySerializer#ensureCompatibility causes StackOverflowException [FLINK-6915] - EnumValueSerializer broken [FLINK-6921] - EnumValueSerializer cannot properly handle appended enum values [FLINK-6922] - Enum(Value)SerializerConfigSnapshot uses Java serialization to store enum values [FLINK-6930] - Selecting window start / end on row-based Tumble/Slide window causes NPE [FLINK-6932] - Update the inaccessible Dataflow Model paper link [FLINK-6941] - Selecting window start / end on over window causes field not resolve exception [FLINK-6948] - EnumValueSerializer cannot handle removed enum values Improvement [FLINK-5354] - Split up Table API documentation into multiple pages [FLINK-6038] - Add deep links to Apache Bahir Flink streaming connector documentations [FLINK-6796] - Allow setting the user code class loader for AbstractStreamOperatorTestHarness [FLINK-6803] - Add test for PojoSerializer when Pojo changes [FLINK-6859] - StateCleaningCountTrigger should not delete timer [FLINK-6929] - Add documentation for Table API OVER windows [FLINK-6952] - Add link to Javadocs [FLINK-6748] - Table API / SQL Docs: Table API Page Test [FLINK-6830] - Add ITTests for savepoint migration from 1.3 [FLINK-6320] - Flakey JobManagerHAJobGraphRecoveryITCase [FLINK-6744] - Flaky ExecutionGraphSchedulingTest [FLINK-6913] - Instable StatefulJobSavepointMigrationITCase.testRestoreSavepoint "}),e.add({id:245,href:"/2017/06/01/apache-flink-1.3.0-release-announcement/",title:"Apache Flink 1.3.0 Release Announcement",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the 1.3.0 release. Over the past 4 months, the Flink community has been working hard to resolve more than 680 issues. See the complete changelog for more detail.
 This is the fourth major release in the 1.x.y series. It is API compatible with the other 1.x.y releases for APIs annotated with the @Public annotation.
 Users can expect Flink releases now in a 4 month cycle. At the beginning of the 1.3 release cycle, the community decided to follow a strict time-based release model.
 We encourage everyone to download the release and check out the documentation. Feedback through the Flink mailing lists is, as always, gladly encouraged!
@@ -4834,7 +4844,7 @@
 Support for iterative conditions (FLINK-6197)
 Gelly Library # Unified driver for running Gelly examples FLINK-4949). PageRank algorithm for directed graphs (FLINK-4896). Add Circulant and Echo graph generators (FLINK-6393). Known Issues # There are two known issues in Flink 1.3.0. Both will be addressed in the 1.3.1 release. FLINK-6783: Wrongly extracted TypeInformations for WindowedStream::aggregate FLINK-6775: StateDescriptor cannot be shared by multiple subtasks List of Contributors # According to git shortlog, the following 103 people contributed to the 1.3.0 release. Thank you to all contributors!
 Addison Higham, Alexey Diomin, Aljoscha Krettek, Andrea Sella, Andrey Melentyev, Anton Mushin, barcahead, biao.liub, Bowen Li, Chen Qin, Chico Sokol, David Anderson, Dawid Wysakowicz, DmytroShkvyra, Fabian Hueske, Fabian Wollert, fengyelei, Flavio Pompermaier, FlorianFan, Fokko Driesprong, Geoffrey Mon, godfreyhe, gosubpl, Greg Hogan, guowei.mgw, hamstah, Haohui Mai, Hequn Cheng, hequn.chq, heytitle, hongyuhong, Jamie Grier, Jark Wu, jingzhang, Jinkui Shi, Jin Mingjian, Joerg Schad, Joshua Griffith, Jürgen Thomann, kaibozhou, Kathleen Sharp, Ken Geis, kkloudas, Kurt Young, lincoln-lil, lingjinjiang, liuyuzhong7, Lorenz Buehmann, manuzhang, Marc Tremblay, Mauro Cortellazzi, Max Kuklinski, mengji.fy, Mike Dias, mtunique, Nico Kruber, Omar Erminy, Patrick Lucas, paul, phoenixjiangnan, rami-alisawi, Ramkrishna, Rick Cox, Robert Metzger, Rodrigo Bonifacio, rtudoran, Seth Wiesman, Shaoxuan Wang, shijinkui, shuai.xus, Shuyi Chen, spkavuly, Stefano Bortoli, Stefan Richter, Stephan Ewen, Stephen Gran, sunjincheng121, tedyu, Till Rohrmann, tonycox, Tony Wei, twalthr, Tzu-Li (Gordon) Tai, Ufuk Celebi, Ventura Del Monte, Vijay Srinivasaraghavan, WangTaoTheTonic, wenlong.lwl, xccui, xiaogang.sxg, Xpray, zcb, zentol, zhangminglei, Zhenghua Gao, Zhijiang, Zhuoluo Yang, zjureel, Zohar Mizrahi, 士远, 槿瑜, 淘江, 金竹
-`}),e.add({id:245,href:"/2017/05/16/introducing-docker-images-for-apache-flink/",title:"Introducing Docker Images for Apache Flink",section:"Flink Blog",content:`For some time, the Apache Flink community has provided scripts to build a Docker image to run Flink. Now, starting with version 1.2.1, Flink will have a Docker image on the Docker Hub. This image is maintained by the Flink community and curated by the Docker team to ensure it meets the quality standards for container images of the Docker community.
+`}),e.add({id:246,href:"/2017/05/16/introducing-docker-images-for-apache-flink/",title:"Introducing Docker Images for Apache Flink",section:"Flink Blog",content:`For some time, the Apache Flink community has provided scripts to build a Docker image to run Flink. Now, starting with version 1.2.1, Flink will have a Docker image on the Docker Hub. This image is maintained by the Flink community and curated by the Docker team to ensure it meets the quality standards for container images of the Docker community.
 A community-maintained way to run Apache Flink on Docker and other container runtimes and orchestrators is part of the ongoing effort by the Flink community to make Flink a first-class citizen of the container world.
 If you want to use the Docker image today you can get the latest version by running:
 docker pull flink And to run a local Flink cluster with one TaskManager and the Web UI exposed on port 8081, run:
@@ -4842,12 +4852,12 @@
 While this announcement is an important milestone, it’s just the first step to help users run containerized Flink in production. There are improvements to be made in Flink itself and we will continue to improve these Docker images and for the documentation and examples surrounding them.
 This is of course a team effort, so any contribution is welcome. The docker-flink GitHub organization hosts the source files to generate the images and the documentation that is presented alongside the images on Docker Hub.
 Disclaimer: The docker images are provided as a community project by individuals on a best-effort basis. They are not official releases by the Apache Flink PMC.
-`}),e.add({id:246,href:"/2017/04/26/apache-flink-1.2.1-released/",title:"Apache Flink 1.2.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.2 series.
+`}),e.add({id:247,href:"/2017/04/26/apache-flink-1.2.1-released/",title:"Apache Flink 1.2.1 Released",section:"Flink Blog",content:`The Apache Flink community released the first bugfix version of the Apache Flink 1.2 series.
 This release includes many critical fixes for Flink 1.2.0. The list below includes a detailed list of all fixes.
 We highly recommend all users to upgrade to Flink 1.2.1.
 Please note that there are two unresolved major issues in Flink 1.2.1 and 1.2.0:
 FLINK-6353 Restoring using CheckpointedRestoring does not work from 1.2 to 1.2 FLINK-6188 Some setParallelism() methods can&rsquo;t cope with default parallelism &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.2.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.2.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.2.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-Release Notes - Flink - Version 1.2.1 Sub-task [FLINK-5546] - java.io.tmpdir setted as project build directory in surefire plugin [FLINK-5640] - configure the explicit Unit Test file suffix [FLINK-5723] - Use &quot;Used&quot; instead of &quot;Initial&quot; to make taskmanager tag more readable [FLINK-5825] - In yarn mode, a small pic can not be loaded Bug [FLINK-4813] - Having flink-test-utils as a dependency outside Flink fails the build [FLINK-4848] - keystoreFilePath should be checked against null in SSLUtils#createSSLServerContext [FLINK-5628] - CheckpointStatsTracker implements Serializable but isn&#39;t [FLINK-5644] - Task#lastCheckpointSize metric broken [FLINK-5650] - Flink-python tests executing cost too long time [FLINK-5652] - Memory leak in AsyncDataStream [FLINK-5669] - flink-streaming-contrib DataStreamUtils.collect in local environment mode fails when offline [FLINK-5678] - User-defined TableFunctions do not support all types of parameters [FLINK-5699] - Cancel with savepoint fails with a NPE if savepoint target directory not set [FLINK-5701] - FlinkKafkaProducer should check asyncException on checkpoints [FLINK-5708] - we should remove duplicated configuration options [FLINK-5732] - Java quick start mvn command line is incorrect [FLINK-5749] - unset HADOOP_HOME and HADOOP_CONF_DIR to avoid env in build machine failing the UT and IT [FLINK-5751] - 404 in documentation [FLINK-5771] - DelimitedInputFormat does not correctly handle multi-byte delimiters [FLINK-5773] - Cannot cast scala.util.Failure to org.apache.flink.runtime.messages.Acknowledge [FLINK-5806] - TaskExecutionState toString format have wrong key [FLINK-5814] - flink-dist creates wrong symlink when not used with cleaned before [FLINK-5817] - Fix test concurrent execution failure by test dir conflicts. [FLINK-5828] - BlobServer create cache dir has concurrency safety problem [FLINK-5885] - Java code snippet instead of scala in documentation [FLINK-5907] - RowCsvInputFormat bug on parsing tsv [FLINK-5934] - Scheduler in ExecutionGraph null if failure happens in ExecutionGraph.restoreLatestCheckpointedState [FLINK-5940] - ZooKeeperCompletedCheckpointStore cannot handle broken state handles [FLINK-5942] - Harden ZooKeeperStateHandleStore to deal with corrupted data [FLINK-5945] - Close function in OuterJoinOperatorBase#executeOnCollections [FLINK-5949] - Flink on YARN checks for Kerberos credentials for non-Kerberos authentication methods [FLINK-5962] - Cancel checkpoint canceller tasks in CheckpointCoordinator [FLINK-5965] - Typo on DropWizard wrappers [FLINK-5972] - Don&#39;t allow shrinking merging windows [FLINK-5985] - Flink treats every task as stateful (making topology changes impossible) [FLINK-6000] - Can not start HA cluster with start-cluster.sh [FLINK-6001] - NPE on TumblingEventTimeWindows with ContinuousEventTimeTrigger and allowedLateness [FLINK-6002] - Documentation: &#39;MacOS X&#39; under &#39;Download and Start Flink&#39; in Quickstart page is not rendered correctly [FLINK-6006] - Kafka Consumer can lose state if queried partition list is incomplete on restore [FLINK-6025] - User code ClassLoader not used when KryoSerializer fallbacks to serialization for copying [FLINK-6051] - Wrong metric scope names in documentation [FLINK-6084] - Cassandra connector does not declare all dependencies [FLINK-6133] - fix build status in README.md [FLINK-6170] - Some checkpoint metrics rely on latest stat snapshot [FLINK-6181] - Zookeeper scripts use invalid regex [FLINK-6182] - Fix possible NPE in SourceStreamTask [FLINK-6183] - TaskMetricGroup may not be cleanup when Task.run() is never called or exits early [FLINK-6184] - Buffer metrics can cause NPE [FLINK-6203] - DataSet Transformations [FLINK-6207] - Duplicate type serializers for async snapshots of CopyOnWriteStateTable [FLINK-6308] - Task managers are not attaching to job manager on macos Improvement [FLINK-4326] - Flink start-up scripts should optionally start services on the foreground [FLINK-5217] - Deprecated interface Checkpointed make clear suggestion [FLINK-5331] - PythonPlanBinderTest idling extremely long [FLINK-5581] - Improve Kerberos security related documentation [FLINK-5639] - Clarify License implications of RabbitMQ Connector [FLINK-5680] - Document env.ssh.opts [FLINK-5681] - Make ReaperThread for SafetyNetCloseableRegistry a singleton [FLINK-5702] - Kafka Producer docs should warn if using setLogFailuresOnly, at-least-once is compromised [FLINK-5705] - webmonitor&#39;s request/response use UTF-8 explicitly [FLINK-5713] - Protect against NPE in WindowOperator window cleanup [FLINK-5721] - Add FoldingState to State Documentation [FLINK-5800] - Make sure that the CheckpointStreamFactory is instantiated once per operator only [FLINK-5805] - improve docs for ProcessFunction [FLINK-5807] - improved wording for doc home page [FLINK-5837] - improve readability of the queryable state docs [FLINK-5876] - Mention Scala type fallacies for queryable state client serializers [FLINK-5877] - Fix Scala snippet in Async I/O API doc [FLINK-5894] - HA docs are misleading re: state backends [FLINK-5895] - Reduce logging aggressiveness of FileSystemSafetyNet [FLINK-5938] - Replace ExecutionContext by Executor in Scheduler [FLINK-6212] - Missing reference to flink-avro dependency New Feature [FLINK-6139] - Documentation for building / preparing Flink for MapR Task [FLINK-2883] - Add documentation to forbid key-modifying ReduceFunction [FLINK-3903] - Homebrew Installation `}),e.add({id:247,href:"/2017/03/30/continuous-queries-on-dynamic-tables/",title:"Continuous Queries on Dynamic Tables",section:"Flink Blog",content:` Analyzing Data Streams with SQL # More and more companies are adopting stream processing and are migrating existing batch applications to streaming or implementing streaming solutions for new use cases. Many of those applications focus on analyzing streaming data. The data streams that are analyzed come from a wide variety of sources such as database transactions, clicks, sensor measurements, or IoT devices.
+Release Notes - Flink - Version 1.2.1 Sub-task [FLINK-5546] - java.io.tmpdir setted as project build directory in surefire plugin [FLINK-5640] - configure the explicit Unit Test file suffix [FLINK-5723] - Use &quot;Used&quot; instead of &quot;Initial&quot; to make taskmanager tag more readable [FLINK-5825] - In yarn mode, a small pic can not be loaded Bug [FLINK-4813] - Having flink-test-utils as a dependency outside Flink fails the build [FLINK-4848] - keystoreFilePath should be checked against null in SSLUtils#createSSLServerContext [FLINK-5628] - CheckpointStatsTracker implements Serializable but isn&#39;t [FLINK-5644] - Task#lastCheckpointSize metric broken [FLINK-5650] - Flink-python tests executing cost too long time [FLINK-5652] - Memory leak in AsyncDataStream [FLINK-5669] - flink-streaming-contrib DataStreamUtils.collect in local environment mode fails when offline [FLINK-5678] - User-defined TableFunctions do not support all types of parameters [FLINK-5699] - Cancel with savepoint fails with a NPE if savepoint target directory not set [FLINK-5701] - FlinkKafkaProducer should check asyncException on checkpoints [FLINK-5708] - we should remove duplicated configuration options [FLINK-5732] - Java quick start mvn command line is incorrect [FLINK-5749] - unset HADOOP_HOME and HADOOP_CONF_DIR to avoid env in build machine failing the UT and IT [FLINK-5751] - 404 in documentation [FLINK-5771] - DelimitedInputFormat does not correctly handle multi-byte delimiters [FLINK-5773] - Cannot cast scala.util.Failure to org.apache.flink.runtime.messages.Acknowledge [FLINK-5806] - TaskExecutionState toString format have wrong key [FLINK-5814] - flink-dist creates wrong symlink when not used with cleaned before [FLINK-5817] - Fix test concurrent execution failure by test dir conflicts. [FLINK-5828] - BlobServer create cache dir has concurrency safety problem [FLINK-5885] - Java code snippet instead of scala in documentation [FLINK-5907] - RowCsvInputFormat bug on parsing tsv [FLINK-5934] - Scheduler in ExecutionGraph null if failure happens in ExecutionGraph.restoreLatestCheckpointedState [FLINK-5940] - ZooKeeperCompletedCheckpointStore cannot handle broken state handles [FLINK-5942] - Harden ZooKeeperStateHandleStore to deal with corrupted data [FLINK-5945] - Close function in OuterJoinOperatorBase#executeOnCollections [FLINK-5949] - Flink on YARN checks for Kerberos credentials for non-Kerberos authentication methods [FLINK-5962] - Cancel checkpoint canceller tasks in CheckpointCoordinator [FLINK-5965] - Typo on DropWizard wrappers [FLINK-5972] - Don&#39;t allow shrinking merging windows [FLINK-5985] - Flink treats every task as stateful (making topology changes impossible) [FLINK-6000] - Can not start HA cluster with start-cluster.sh [FLINK-6001] - NPE on TumblingEventTimeWindows with ContinuousEventTimeTrigger and allowedLateness [FLINK-6002] - Documentation: &#39;MacOS X&#39; under &#39;Download and Start Flink&#39; in Quickstart page is not rendered correctly [FLINK-6006] - Kafka Consumer can lose state if queried partition list is incomplete on restore [FLINK-6025] - User code ClassLoader not used when KryoSerializer fallbacks to serialization for copying [FLINK-6051] - Wrong metric scope names in documentation [FLINK-6084] - Cassandra connector does not declare all dependencies [FLINK-6133] - fix build status in README.md [FLINK-6170] - Some checkpoint metrics rely on latest stat snapshot [FLINK-6181] - Zookeeper scripts use invalid regex [FLINK-6182] - Fix possible NPE in SourceStreamTask [FLINK-6183] - TaskMetricGroup may not be cleanup when Task.run() is never called or exits early [FLINK-6184] - Buffer metrics can cause NPE [FLINK-6203] - DataSet Transformations [FLINK-6207] - Duplicate type serializers for async snapshots of CopyOnWriteStateTable [FLINK-6308] - Task managers are not attaching to job manager on macos Improvement [FLINK-4326] - Flink start-up scripts should optionally start services on the foreground [FLINK-5217] - Deprecated interface Checkpointed make clear suggestion [FLINK-5331] - PythonPlanBinderTest idling extremely long [FLINK-5581] - Improve Kerberos security related documentation [FLINK-5639] - Clarify License implications of RabbitMQ Connector [FLINK-5680] - Document env.ssh.opts [FLINK-5681] - Make ReaperThread for SafetyNetCloseableRegistry a singleton [FLINK-5702] - Kafka Producer docs should warn if using setLogFailuresOnly, at-least-once is compromised [FLINK-5705] - webmonitor&#39;s request/response use UTF-8 explicitly [FLINK-5713] - Protect against NPE in WindowOperator window cleanup [FLINK-5721] - Add FoldingState to State Documentation [FLINK-5800] - Make sure that the CheckpointStreamFactory is instantiated once per operator only [FLINK-5805] - improve docs for ProcessFunction [FLINK-5807] - improved wording for doc home page [FLINK-5837] - improve readability of the queryable state docs [FLINK-5876] - Mention Scala type fallacies for queryable state client serializers [FLINK-5877] - Fix Scala snippet in Async I/O API doc [FLINK-5894] - HA docs are misleading re: state backends [FLINK-5895] - Reduce logging aggressiveness of FileSystemSafetyNet [FLINK-5938] - Replace ExecutionContext by Executor in Scheduler [FLINK-6212] - Missing reference to flink-avro dependency New Feature [FLINK-6139] - Documentation for building / preparing Flink for MapR Task [FLINK-2883] - Add documentation to forbid key-modifying ReduceFunction [FLINK-3903] - Homebrew Installation `}),e.add({id:248,href:"/2017/03/30/continuous-queries-on-dynamic-tables/",title:"Continuous Queries on Dynamic Tables",section:"Flink Blog",content:` Analyzing Data Streams with SQL # More and more companies are adopting stream processing and are migrating existing batch applications to streaming or implementing streaming solutions for new use cases. Many of those applications focus on analyzing streaming data. The data streams that are analyzed come from a wide variety of sources such as database transactions, clicks, sensor measurements, or IoT devices.
 Apache Flink is very well suited to power streaming analytics applications because it provides support for event-time semantics, stateful exactly-once processing, and achieves high throughput and low latency at the same time. Due to these features, Flink is able to compute exact and deterministic results from high-volume input streams in near real-time while providing exactly-once semantics in case of failures.
 Flink&rsquo;s core API for stream processing, the DataStream API, is very expressive and provides primitives for many common operations. Among other features, it offers highly customizable windowing logic, different state primitives with varying performance characteristics, hooks to register and react on timers, and tooling for efficient asynchronous requests to external systems. On the other hand, many stream analytics applications follow similar patterns and do not require the level of expressiveness as provided by the DataStream API. They could be expressed in a more natural and concise way using a domain specific language. As we all know, SQL is the de-facto standard for data analytics. For streaming analytics, SQL would enable a larger pool of people to specify applications on data streams in less time. However, no open source stream processor offers decent SQL support yet.
 Why is SQL on Streams a Big Deal? # SQL is the most widely used language for data analytics for many good reasons:
@@ -4889,7 +4899,7 @@
 The answer to all these questions is simple. The current processing model is a subset of the dynamic table model. Using the terminology we introduced in this post, the current model converts a stream into a dynamic table in append mode, i.e., an infinitely growing table. Since all operators only accept insert changes and produce insert changes on their result table (i.e., emit new rows), all supported queries result in dynamic append tables, which are converted back into DataStreams using the redo model for append-only tables. Consequently, the semantics of the current model are completely covered and preserved by the new dynamic table model.
 Conclusion and Outlook # Flink&rsquo;s relational APIs are great to implement stream analytics applications in no time and used in several production settings. In this blog post we discussed the future of the Table API and SQL. This effort will make Flink and stream processing accessible to more people. Moreover, the unified semantics for querying historic and real-time data as well as the concept of querying and maintaining dynamic tables will enable and significantly ease the implementation of many exciting use cases and applications. As this post was focusing on the semantics of relational queries on streams and dynamic tables, we did not discuss the details of how a query will be executed, which includes the internal implementation of retractions, handling of late events, support for early results, and bounding space requirements. We plan to publish a follow up blog post on this topic at a later point in time.
 In recent months, many members of the Flink community have been discussing and contributing to the relational APIs. We made great progress so far. While most work has focused on processing streams in append mode, the next steps on the agenda are to work on dynamic tables to support queries that update their results. If you are excited about the idea of processing streams with SQL and would like to contribute to this effort, please give feedback, join the discussions on the mailing list, or grab a JIRA issue to work on.
-`}),e.add({id:248,href:"/2017/03/29/from-streams-to-tables-and-back-again-an-update-on-flinks-table-sql-api/",title:"From Streams to Tables and Back Again: An Update on Flink's Table & SQL API",section:"Flink Blog",content:`Stream processing can deliver a lot of value. Many organizations have recognized the benefit of managing large volumes of data in real-time, reacting quickly to trends, and providing customers with live services at scale. Streaming applications with well-defined business logic can deliver a competitive advantage.
+`}),e.add({id:249,href:"/2017/03/29/from-streams-to-tables-and-back-again-an-update-on-flinks-table-sql-api/",title:"From Streams to Tables and Back Again: An Update on Flink's Table & SQL API",section:"Flink Blog",content:`Stream processing can deliver a lot of value. Many organizations have recognized the benefit of managing large volumes of data in real-time, reacting quickly to trends, and providing customers with live services at scale. Streaming applications with well-defined business logic can deliver a competitive advantage.
 Flink&rsquo;s DataStream abstraction is a powerful API which lets you flexibly define both basic and complex streaming pipelines. Additionally, it offers low-level operations such as Async IO and ProcessFunctions. However, many users do not need such a deep level of flexibility. They need an API which quickly solves 80% of their use cases where simple tasks can be defined using little code.
 To deliver the power of stream processing to a broader set of users, the Apache Flink community is developing APIs that provide simpler abstractions and more concise syntax so that users can focus on their business logic instead of advanced streaming concepts. Along with other APIs (such as CEP for complex event processing on streams), Flink offers a relational API that aims to unify stream and batch processing: the Table &amp; SQL API, often referred to as the Table API.
 Recently, contributors working for companies such as Alibaba, Huawei, data Artisans, and more decided to further develop the Table API. Over the past year, the Table API has been rewritten entirely. Since Flink 1.1, its core has been based on Apache Calcite, which parses SQL and optimizes all relational queries. Today, the Table API can address a wide range of use cases in both batch and stream environments with unified semantics.
@@ -4917,10 +4927,10 @@
 class PropertiesExtractor extends TableFunction[Row] { def eval(prefs: String): Unit = { // split string into (key, value) pairs val pairs = prefs .split(&#34;,&#34;) .map { kv =&gt; val split = kv.split(&#34;=&#34;) (split(0), split(1)) } val color = pairs.find(\\_.\\_1 == &#34;color&#34;).map(\\_.\\_2) val size = pairs.find(\\_.\\_1 == &#34;size&#34;).map(\\_.\\_2) // emit a row if color and size are specified (color, size) match { case (Some(c), Some(s)) =&gt; collect(Row.of(c, s)) case _ =&gt; // skip } } override def getResultType = new RowTypeInfo(Types.STRING, Types.STRING) } Conclusion # There is significant interest in making streaming more accessible and easier to use. Flink’s Table API development is happening quickly, and we believe that soon, you will be able to implement large batch or streaming pipelines using purely relational APIs or even convert existing Flink jobs to table programs. The Table API is already a very useful tool since you can work around limitations and missing features at any time by switching back-and-forth between the DataSet/DataStream abstraction to the Table abstraction.
 Contributions like support of Apache Hive UDFs, external catalogs, more TableSources, additional windows, and more operators will make the Table API an even more useful tool. Particularly, the upcoming introduction of Dynamic Tables, which is worth a blog post of its own, shows that even in 2017, new relational APIs open the door to a number of possibilities.
 Try it out, or even better, join the design discussions on the mailing lists and JIRA and start contributing!
-`}),e.add({id:249,href:"/2017/03/23/apache-flink-1.1.5-released/",title:"Apache Flink 1.1.5 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.1 series.
+`}),e.add({id:250,href:"/2017/03/23/apache-flink-1.1.5-released/",title:"Apache Flink 1.1.5 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.1 series.
 This release includes critical fixes for HA recovery robustness, fault tolerance guarantees of the Flink Kafka Connector, as well as classloading issues with the Kryo serializer. We highly recommend all users to upgrade to Flink 1.1.5.
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.1.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.5&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.5&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-Release Notes - Flink - Version 1.1.5 # Bug # [FLINK-5701] - FlinkKafkaProducer should check asyncException on checkpoints [FLINK-6006] - Kafka Consumer can lose state if queried partition list is incomplete on restore [FLINK-5940] - ZooKeeperCompletedCheckpointStore cannot handle broken state handles [FLINK-5942] - Harden ZooKeeperStateHandleStore to deal with corrupted data [FLINK-6025] - User code ClassLoader not used when KryoSerializer fallbacks to serialization for copying [FLINK-5945] - Close function in OuterJoinOperatorBase#executeOnCollections [FLINK-5934] - Scheduler in ExecutionGraph null if failure happens in ExecutionGraph.restoreLatestCheckpointedState [FLINK-5771] - DelimitedInputFormat does not correctly handle multi-byte delimiters [FLINK-5647] - Fix RocksDB Backend Cleanup [FLINK-2662] - CompilerException: "Bug: Plan generation for Unions picked a ship strategy between binary plan operators." [FLINK-5585] - NullPointer Exception in JobManager.updateAccumulators [FLINK-5484] - Add test for registered Kryo types [FLINK-5518] - HadoopInputFormat throws NPE when close() is called before open() Improvement # [FLINK-5575] - in old releases, warn users and guide them to the latest stable docs [FLINK-5639] - Clarify License implications of RabbitMQ Connector [FLINK-5466] - Make production environment default in gulpfile `}),e.add({id:250,href:"/2017/02/06/announcing-apache-flink-1.2.0/",title:"Announcing Apache Flink 1.2.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the 1.2.0 release. Over the past months, the Flink community has been working hard to resolve 650 issues. See the complete changelog for more detail.
+Release Notes - Flink - Version 1.1.5 # Bug # [FLINK-5701] - FlinkKafkaProducer should check asyncException on checkpoints [FLINK-6006] - Kafka Consumer can lose state if queried partition list is incomplete on restore [FLINK-5940] - ZooKeeperCompletedCheckpointStore cannot handle broken state handles [FLINK-5942] - Harden ZooKeeperStateHandleStore to deal with corrupted data [FLINK-6025] - User code ClassLoader not used when KryoSerializer fallbacks to serialization for copying [FLINK-5945] - Close function in OuterJoinOperatorBase#executeOnCollections [FLINK-5934] - Scheduler in ExecutionGraph null if failure happens in ExecutionGraph.restoreLatestCheckpointedState [FLINK-5771] - DelimitedInputFormat does not correctly handle multi-byte delimiters [FLINK-5647] - Fix RocksDB Backend Cleanup [FLINK-2662] - CompilerException: "Bug: Plan generation for Unions picked a ship strategy between binary plan operators." [FLINK-5585] - NullPointer Exception in JobManager.updateAccumulators [FLINK-5484] - Add test for registered Kryo types [FLINK-5518] - HadoopInputFormat throws NPE when close() is called before open() Improvement # [FLINK-5575] - in old releases, warn users and guide them to the latest stable docs [FLINK-5639] - Clarify License implications of RabbitMQ Connector [FLINK-5466] - Make production environment default in gulpfile `}),e.add({id:251,href:"/2017/02/06/announcing-apache-flink-1.2.0/",title:"Announcing Apache Flink 1.2.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the 1.2.0 release. Over the past months, the Flink community has been working hard to resolve 650 issues. See the complete changelog for more detail.
 This is the third major release in the 1.x.y series. It is API compatible with the other 1.x.y releases for APIs annotated with the @Public annotation.
 We encourage everyone to download the release and check out the documentation. Feedback through the Flink mailing lists is, as always, gladly encouraged!
 You can find the binaries on the updated Downloads page. Some highlights of the release are listed below.
@@ -4953,11 +4963,11 @@
 Kafka 0.10 support: Flink 1.2 now provides a connector for Apache Kafka 0.10.0.x, including support for consuming and producing messages with a timestamp using Flink’s internal event time (Kafka Connector Documentation)
 Evictor Semantics: Flink 1.2 ships with more expressive evictor semantics that allow the programmer to evict elements form a window both before and after the application of the window function, and to remove elements arbitrarily (Evictor Semantics Documentation)
 List of Contributors # According to git shortlog, the following 122 people contributed to the 1.2.0 release. Thank you to all contributors!
-Abhishek R. Singh Ahmad Ragab Aleksandr Chermenin Alexander Pivovarov Alexander Shoshin Alexey Diomin Aljoscha Krettek Andrey Melentyev Anton Mushin Bob Thorman Boris Osipov Bram Vogelaar Bruno Aranda David Anderson Dominik Evgeny_Kincharov Fabian Hueske Fokko Driesprong Gabor Gevay George Gordon Tai Greg Hogan Gyula Fora Haohui Mai Holger Frydrych HungUnicorn Ismaël Mejía Ivan Mushketyk Jakub Havlik Jark Wu Jendrik Poloczek Jincheng Sun Josh Joshi Keiji Yoshida Kirill Morozov Kurt Young Liwei Lin Lorenz Buehmann Maciek Próchniak Makman2 Markus Müller Martin Junghanns Márton Balassi Max Kuklinski Maximilian Michels Milosz Tanski Nagarjun Neelesh Srinivas Salian Neil Derraugh Nick Chadwick Nico Kruber Niels Basjes Pattarawat Chormai Piotr Godek Raghav Ramkrishna Robert Metzger Rohit Agarwal Roman Maier Sachin Sachin Goel Scott Kidder Shannon Carey Stefan Richter Steffen Hausmann Stephan Epping Stephan Ewen Sunny T Suri Theodore Vasiloudis Till Rohrmann Tony Wei Tzu-Li (Gordon) Tai Ufuk Celebi Vijay Srinivasaraghavan Vishnu Viswanath WangTaoTheTonic William-Sang Yassine Marzougui anton solovev beyond1920 biao.liub chobeat danielblazevski f7753 fengyelei fengyelei 00406569 gallenvara gaolun.gl godfreyhe heytitle hzyuemeng1 iteblog kl0u larsbachmann lincoln-lil manuzhang medale miaoever mtunique radekg renkai sergey_sokur shijinkui shuai.xus smarthi swapnil-chougule tedyu tibor.moger tonycox twalthr vasia wenlong.lwl wrighe3 xiaogang.sxg yushi.wxg yuzhongliu zentol zhuhaifengleon 淘江 魏偉哲 `}),e.add({id:251,href:"/2016/12/21/apache-flink-1.1.4-released/",title:"Apache Flink 1.1.4 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.1 series.
+Abhishek R. Singh Ahmad Ragab Aleksandr Chermenin Alexander Pivovarov Alexander Shoshin Alexey Diomin Aljoscha Krettek Andrey Melentyev Anton Mushin Bob Thorman Boris Osipov Bram Vogelaar Bruno Aranda David Anderson Dominik Evgeny_Kincharov Fabian Hueske Fokko Driesprong Gabor Gevay George Gordon Tai Greg Hogan Gyula Fora Haohui Mai Holger Frydrych HungUnicorn Ismaël Mejía Ivan Mushketyk Jakub Havlik Jark Wu Jendrik Poloczek Jincheng Sun Josh Joshi Keiji Yoshida Kirill Morozov Kurt Young Liwei Lin Lorenz Buehmann Maciek Próchniak Makman2 Markus Müller Martin Junghanns Márton Balassi Max Kuklinski Maximilian Michels Milosz Tanski Nagarjun Neelesh Srinivas Salian Neil Derraugh Nick Chadwick Nico Kruber Niels Basjes Pattarawat Chormai Piotr Godek Raghav Ramkrishna Robert Metzger Rohit Agarwal Roman Maier Sachin Sachin Goel Scott Kidder Shannon Carey Stefan Richter Steffen Hausmann Stephan Epping Stephan Ewen Sunny T Suri Theodore Vasiloudis Till Rohrmann Tony Wei Tzu-Li (Gordon) Tai Ufuk Celebi Vijay Srinivasaraghavan Vishnu Viswanath WangTaoTheTonic William-Sang Yassine Marzougui anton solovev beyond1920 biao.liub chobeat danielblazevski f7753 fengyelei fengyelei 00406569 gallenvara gaolun.gl godfreyhe heytitle hzyuemeng1 iteblog kl0u larsbachmann lincoln-lil manuzhang medale miaoever mtunique radekg renkai sergey_sokur shijinkui shuai.xus smarthi swapnil-chougule tedyu tibor.moger tonycox twalthr vasia wenlong.lwl wrighe3 xiaogang.sxg yushi.wxg yuzhongliu zentol zhuhaifengleon 淘江 魏偉哲 `}),e.add({id:252,href:"/2016/12/21/apache-flink-1.1.4-released/",title:"Apache Flink 1.1.4 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.1 series.
 This release includes major robustness improvements for checkpoint cleanup on failures and consumption of intermediate streams. We highly recommend all users to upgrade to Flink 1.1.4.
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.1.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.4&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.4&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 Note for RocksDB Backend Users # We updated Flink&rsquo;s RocksDB dependency version from 4.5.1 to 4.11.2. Between these versions some of RocksDB&rsquo;s internal configuration defaults changed that would affect the memory footprint of running Flink with RocksDB. Therefore, we manually reset them to the previous defaults. If you want to run with the new Rocks 4.11.2 defaults, you can do this via:
-RocksDBStateBackend backend = new RocksDBStateBackend(&#34;...&#34;); // Use the new default options. Otherwise, the default for RocksDB 4.5.1 // \`PredefinedOptions.DEFAULT_ROCKS_4_5_1\` will be used. backend.setPredefinedOptions(PredefinedOptions.DEFAULT); Release Notes - Flink - Version 1.1.4 # Sub-task # [FLINK-4510] - Always create CheckpointCoordinator [FLINK-4984] - Add Cancellation Barriers to BarrierTracker and BarrierBuffer [FLINK-4985] - Report Declined/Canceled Checkpoints to Checkpoint Coordinator Bug # [FLINK-2662] - CompilerException: &quot;Bug: Plan generation for Unions picked a ship strategy between binary plan operators.&quot; [FLINK-3680] - Remove or improve (not set) text in the Job Plan UI [FLINK-3813] - YARNSessionFIFOITCase.testDetachedMode failed on Travis [FLINK-4108] - NPE in Row.productArity [FLINK-4506] - CsvOutputFormat defaults allowNullValues to false, even though doc and declaration says true [FLINK-4581] - Table API throws &quot;No suitable driver found for jdbc:calcite&quot; [FLINK-4586] - NumberSequenceIterator and Accumulator threading issue [FLINK-4619] - JobManager does not answer to client when restore from savepoint fails [FLINK-4727] - Kafka 0.9 Consumer should also checkpoint auto retrieved offsets even when no data is read [FLINK-4862] - NPE on EventTimeSessionWindows with ContinuousEventTimeTrigger [FLINK-4932] - Don&#39;t let ExecutionGraph fail when in state Restarting [FLINK-4933] - ExecutionGraph.scheduleOrUpdateConsumers can fail the ExecutionGraph [FLINK-4977] - Enum serialization does not work in all cases [FLINK-4991] - TestTask hangs in testWatchDogInterruptsTask [FLINK-4998] - ResourceManager fails when num task slots &gt; Yarn vcores [FLINK-5013] - Flink Kinesis connector doesn&#39;t work on old EMR versions [FLINK-5028] - Stream Tasks must not go through clean shutdown logic on cancellation [FLINK-5038] - Errors in the &quot;cancelTask&quot; method prevent closeables from being closed early [FLINK-5039] - Avro GenericRecord support is broken [FLINK-5040] - Set correct input channel types with eager scheduling [FLINK-5050] - JSON.org license is CatX [FLINK-5057] - Cancellation timeouts are picked from wrong config [FLINK-5058] - taskManagerMemory attribute set wrong value in FlinkShell [FLINK-5063] - State handles are not properly cleaned up for declined or expired checkpoints [FLINK-5073] - ZooKeeperCompleteCheckpointStore executes blocking delete operation in ZooKeeper client thread [FLINK-5075] - Kinesis consumer incorrectly determines shards as newly discovered when tested against Kinesalite [FLINK-5082] - Pull ExecutionService lifecycle management out of the JobManager [FLINK-5085] - Execute CheckpointCoodinator&#39;s state discard calls asynchronously [FLINK-5114] - PartitionState update with finished execution fails [FLINK-5142] - Resource leak in CheckpointCoordinator [FLINK-5149] - ContinuousEventTimeTrigger doesn&#39;t fire at the end of the window [FLINK-5154] - Duplicate TypeSerializer when writing RocksDB Snapshot [FLINK-5158] - Handle ZooKeeperCompletedCheckpointStore exceptions in CheckpointCoordinator [FLINK-5172] - In RocksDBStateBackend, set flink-core and flink-streaming-java to &quot;provided&quot; [FLINK-5173] - Upgrade RocksDB dependency [FLINK-5184] - Error result of compareSerialized in RowComparator class [FLINK-5193] - Recovering all jobs fails completely if a single recovery fails [FLINK-5197] - Late JobStatusChanged messages can interfere with running jobs [FLINK-5214] - Clean up checkpoint files when failing checkpoint operation on TM [FLINK-5215] - Close checkpoint streams upon cancellation [FLINK-5216] - CheckpointCoordinator&#39;s &#39;minPauseBetweenCheckpoints&#39; refers to checkpoint start rather then checkpoint completion [FLINK-5218] - Eagerly close checkpoint streams on cancellation [FLINK-5228] - LocalInputChannel re-trigger request and release deadlock [FLINK-5229] - Cleanup StreamTaskStates if a checkpoint operation of a subsequent operator fails [FLINK-5246] - Don&#39;t discard unknown checkpoint messages in the CheckpointCoordinator [FLINK-5248] - SavepointITCase doesn&#39;t catch savepoint restore failure [FLINK-5274] - LocalInputChannel throws NPE if partition reader is released [FLINK-5275] - InputChanelDeploymentDescriptors throws misleading Exception if producer failed/cancelled [FLINK-5276] - ExecutionVertex archiving can throw NPE with many previous attempts [FLINK-5285] - CancelCheckpointMarker flood when using at least once mode [FLINK-5326] - IllegalStateException: Bug in Netty consumer logic: reader queue got notified by partition about available data, but none was available [FLINK-5352] - Restore RocksDB 1.1.3 memory behavior Improvement # [FLINK-3347] - TaskManager (or its ActorSystem) need to restart in case they notice quarantine [FLINK-3787] - Yarn client does not report unfulfillable container constraints [FLINK-4445] - Ignore unmatched state when restoring from savepoint [FLINK-4715] - TaskManager should commit suicide after cancellation failure [FLINK-4894] - Don&#39;t block on buffer request after broadcastEvent [FLINK-4975] - Add a limit for how much data may be buffered during checkpoint alignment [FLINK-4996] - Make CrossHint @Public [FLINK-5046] - Avoid redundant serialization when creating the TaskDeploymentDescriptor [FLINK-5123] - Add description how to do proper shading to Flink docs. [FLINK-5169] - Make consumption of input channels fair [FLINK-5192] - Provide better log config templates [FLINK-5194] - Log heartbeats on TRACE level [FLINK-5196] - Don&#39;t log InputChannelDescriptor [FLINK-5198] - Overwrite TaskState toString [FLINK-5199] - Improve logging of submitted job graph actions in HA case [FLINK-5201] - Promote loaded config properties to INFO [FLINK-5207] - Decrease HadoopFileSystem logging [FLINK-5249] - description of datastream rescaling doesn&#39;t match the figure [FLINK-5259] - wrong execution environment in retry delays example [FLINK-5278] - Improve Task and checkpoint logging New Feature # [FLINK-4976] - Add a way to abort in flight checkpoints Task # [FLINK-4778] - Update program example in /docs/setup/cli.md due to the change in FLINK-2021 `}),e.add({id:252,href:"/2016/12/19/apache-flink-in-2016-year-in-review/",title:"Apache Flink in 2016: Year in Review",section:"Flink Blog",content:`2016 was an exciting year for the Apache Flink® community, and the release of Flink 1.0 in March marked the first time in Flink’s history that the community guaranteed API backward compatibility for all versions in a series. This step forward for Flink was followed by many new and exciting production deployments in organizations of all shapes and sizes, all around the globe.
+RocksDBStateBackend backend = new RocksDBStateBackend(&#34;...&#34;); // Use the new default options. Otherwise, the default for RocksDB 4.5.1 // \`PredefinedOptions.DEFAULT_ROCKS_4_5_1\` will be used. backend.setPredefinedOptions(PredefinedOptions.DEFAULT); Release Notes - Flink - Version 1.1.4 # Sub-task # [FLINK-4510] - Always create CheckpointCoordinator [FLINK-4984] - Add Cancellation Barriers to BarrierTracker and BarrierBuffer [FLINK-4985] - Report Declined/Canceled Checkpoints to Checkpoint Coordinator Bug # [FLINK-2662] - CompilerException: &quot;Bug: Plan generation for Unions picked a ship strategy between binary plan operators.&quot; [FLINK-3680] - Remove or improve (not set) text in the Job Plan UI [FLINK-3813] - YARNSessionFIFOITCase.testDetachedMode failed on Travis [FLINK-4108] - NPE in Row.productArity [FLINK-4506] - CsvOutputFormat defaults allowNullValues to false, even though doc and declaration says true [FLINK-4581] - Table API throws &quot;No suitable driver found for jdbc:calcite&quot; [FLINK-4586] - NumberSequenceIterator and Accumulator threading issue [FLINK-4619] - JobManager does not answer to client when restore from savepoint fails [FLINK-4727] - Kafka 0.9 Consumer should also checkpoint auto retrieved offsets even when no data is read [FLINK-4862] - NPE on EventTimeSessionWindows with ContinuousEventTimeTrigger [FLINK-4932] - Don&#39;t let ExecutionGraph fail when in state Restarting [FLINK-4933] - ExecutionGraph.scheduleOrUpdateConsumers can fail the ExecutionGraph [FLINK-4977] - Enum serialization does not work in all cases [FLINK-4991] - TestTask hangs in testWatchDogInterruptsTask [FLINK-4998] - ResourceManager fails when num task slots &gt; Yarn vcores [FLINK-5013] - Flink Kinesis connector doesn&#39;t work on old EMR versions [FLINK-5028] - Stream Tasks must not go through clean shutdown logic on cancellation [FLINK-5038] - Errors in the &quot;cancelTask&quot; method prevent closeables from being closed early [FLINK-5039] - Avro GenericRecord support is broken [FLINK-5040] - Set correct input channel types with eager scheduling [FLINK-5050] - JSON.org license is CatX [FLINK-5057] - Cancellation timeouts are picked from wrong config [FLINK-5058] - taskManagerMemory attribute set wrong value in FlinkShell [FLINK-5063] - State handles are not properly cleaned up for declined or expired checkpoints [FLINK-5073] - ZooKeeperCompleteCheckpointStore executes blocking delete operation in ZooKeeper client thread [FLINK-5075] - Kinesis consumer incorrectly determines shards as newly discovered when tested against Kinesalite [FLINK-5082] - Pull ExecutionService lifecycle management out of the JobManager [FLINK-5085] - Execute CheckpointCoodinator&#39;s state discard calls asynchronously [FLINK-5114] - PartitionState update with finished execution fails [FLINK-5142] - Resource leak in CheckpointCoordinator [FLINK-5149] - ContinuousEventTimeTrigger doesn&#39;t fire at the end of the window [FLINK-5154] - Duplicate TypeSerializer when writing RocksDB Snapshot [FLINK-5158] - Handle ZooKeeperCompletedCheckpointStore exceptions in CheckpointCoordinator [FLINK-5172] - In RocksDBStateBackend, set flink-core and flink-streaming-java to &quot;provided&quot; [FLINK-5173] - Upgrade RocksDB dependency [FLINK-5184] - Error result of compareSerialized in RowComparator class [FLINK-5193] - Recovering all jobs fails completely if a single recovery fails [FLINK-5197] - Late JobStatusChanged messages can interfere with running jobs [FLINK-5214] - Clean up checkpoint files when failing checkpoint operation on TM [FLINK-5215] - Close checkpoint streams upon cancellation [FLINK-5216] - CheckpointCoordinator&#39;s &#39;minPauseBetweenCheckpoints&#39; refers to checkpoint start rather then checkpoint completion [FLINK-5218] - Eagerly close checkpoint streams on cancellation [FLINK-5228] - LocalInputChannel re-trigger request and release deadlock [FLINK-5229] - Cleanup StreamTaskStates if a checkpoint operation of a subsequent operator fails [FLINK-5246] - Don&#39;t discard unknown checkpoint messages in the CheckpointCoordinator [FLINK-5248] - SavepointITCase doesn&#39;t catch savepoint restore failure [FLINK-5274] - LocalInputChannel throws NPE if partition reader is released [FLINK-5275] - InputChanelDeploymentDescriptors throws misleading Exception if producer failed/cancelled [FLINK-5276] - ExecutionVertex archiving can throw NPE with many previous attempts [FLINK-5285] - CancelCheckpointMarker flood when using at least once mode [FLINK-5326] - IllegalStateException: Bug in Netty consumer logic: reader queue got notified by partition about available data, but none was available [FLINK-5352] - Restore RocksDB 1.1.3 memory behavior Improvement # [FLINK-3347] - TaskManager (or its ActorSystem) need to restart in case they notice quarantine [FLINK-3787] - Yarn client does not report unfulfillable container constraints [FLINK-4445] - Ignore unmatched state when restoring from savepoint [FLINK-4715] - TaskManager should commit suicide after cancellation failure [FLINK-4894] - Don&#39;t block on buffer request after broadcastEvent [FLINK-4975] - Add a limit for how much data may be buffered during checkpoint alignment [FLINK-4996] - Make CrossHint @Public [FLINK-5046] - Avoid redundant serialization when creating the TaskDeploymentDescriptor [FLINK-5123] - Add description how to do proper shading to Flink docs. [FLINK-5169] - Make consumption of input channels fair [FLINK-5192] - Provide better log config templates [FLINK-5194] - Log heartbeats on TRACE level [FLINK-5196] - Don&#39;t log InputChannelDescriptor [FLINK-5198] - Overwrite TaskState toString [FLINK-5199] - Improve logging of submitted job graph actions in HA case [FLINK-5201] - Promote loaded config properties to INFO [FLINK-5207] - Decrease HadoopFileSystem logging [FLINK-5249] - description of datastream rescaling doesn&#39;t match the figure [FLINK-5259] - wrong execution environment in retry delays example [FLINK-5278] - Improve Task and checkpoint logging New Feature # [FLINK-4976] - Add a way to abort in flight checkpoints Task # [FLINK-4778] - Update program example in /docs/setup/cli.md due to the change in FLINK-2021 `}),e.add({id:253,href:"/2016/12/19/apache-flink-in-2016-year-in-review/",title:"Apache Flink in 2016: Year in Review",section:"Flink Blog",content:`2016 was an exciting year for the Apache Flink® community, and the release of Flink 1.0 in March marked the first time in Flink’s history that the community guaranteed API backward compatibility for all versions in a series. This step forward for Flink was followed by many new and exciting production deployments in organizations of all shapes and sizes, all around the globe.
 In this post, we’ll look back on the project’s progress over the course of 2016, and we’ll also preview what 2017 has in store.
 {%toc%}
 Community Growth # Github # First, here&rsquo;s a summary of community statistics from GitHub. At the time of writing:
@@ -4987,19 +4997,19 @@
 Side inputs, as described in this design document, to enable the joining of a main, high-throughput stream with one more more inputs with static or slowly-changing data.
 If you&rsquo;re interested in getting involved with Flink, we encourage you to take a look at the FLIPs and to join the discussion via the Flink mailing lists.
 Lastly, we&rsquo;d like to extend a sincere thank you to all of the Flink community for making 2016 a great year!
-`}),e.add({id:253,href:"/2016/10/12/apache-flink-1.1.3-released/",title:"Apache Flink 1.1.3 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.1. series.
+`}),e.add({id:254,href:"/2016/10/12/apache-flink-1.1.3-released/",title:"Apache Flink 1.1.3 Released",section:"Flink Blog",content:`The Apache Flink community released the next bugfix version of the Apache Flink 1.1. series.
 We recommend all users to upgrade to Flink 1.1.3.
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.1.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
 Note for RocksDB Backend Users # It is highly recommended to use the &ldquo;fully async&rdquo; mode for the RocksDB state backend. The &ldquo;fully async&rdquo; mode will most likely allow you to easily upgrade to Flink 1.2 (via savepoints) when it is released. The &ldquo;semi async&rdquo; mode will no longer be supported by Flink 1.2.
-RocksDBStateBackend backend = new RocksDBStateBackend(&#34;...&#34;); backend.enableFullyAsyncSnapshots(); Release Notes - Flink - Version 1.1.3 # Bug [FLINK-2662] - CompilerException: &quot;Bug: Plan generation for Unions picked a ship strategy between binary plan operators.&quot; [FLINK-4311] - TableInputFormat fails when reused on next split [FLINK-4329] - Fix Streaming File Source Timestamps/Watermarks Handling [FLINK-4485] - Finished jobs in yarn session fill /tmp filesystem [FLINK-4513] - Kafka connector documentation refers to Flink 1.1-SNAPSHOT [FLINK-4514] - ExpiredIteratorException in Kinesis Consumer on long catch-ups to head of stream [FLINK-4540] - Detached job execution may prevent cluster shutdown [FLINK-4544] - TaskManager metrics are vulnerable to custom JMX bean installation [FLINK-4566] - ProducerFailedException does not properly preserve Exception causes [FLINK-4588] - Fix Merging of Covering Window in MergingWindowSet [FLINK-4589] - Fix Merging of Covering Window in MergingWindowSet [FLINK-4616] - Kafka consumer doesn&#39;t store last emmited watermarks per partition in state [FLINK-4618] - FlinkKafkaConsumer09 should start from the next record on startup from offsets in Kafka [FLINK-4619] - JobManager does not answer to client when restore from savepoint fails [FLINK-4636] - AbstractCEPPatternOperator fails to restore state [FLINK-4640] - Serialization of the initialValue of a Fold on WindowedStream fails [FLINK-4651] - Re-register processing time timers at the WindowOperator upon recovery. [FLINK-4663] - Flink JDBCOutputFormat logs wrong WARN message [FLINK-4672] - TaskManager accidentally decorates Kill messages [FLINK-4677] - Jars with no job executions produces NullPointerException in ClusterClient [FLINK-4702] - Kafka consumer must commit offsets asynchronously [FLINK-4727] - Kafka 0.9 Consumer should also checkpoint auto retrieved offsets even when no data is read [FLINK-4732] - Maven junction plugin security threat [FLINK-4777] - ContinuousFileMonitoringFunction may throw IOException when files are moved [FLINK-4788] - State backend class cannot be loaded, because fully qualified name converted to lower-case Improvement [FLINK-4396] - GraphiteReporter class not found at startup of jobmanager [FLINK-4574] - Strengthen fetch interval implementation in Kinesis consumer [FLINK-4723] - Unify behaviour of committed offsets to Kafka / ZK for Kafka 0.8 and 0.9 consumer `}),e.add({id:254,href:"/2016/09/05/apache-flink-1.1.2-released/",title:"Apache Flink 1.1.2 Released",section:"Flink Blog",content:`The Apache Flink community released another bugfix version of the Apache Flink 1.1. series.
+RocksDBStateBackend backend = new RocksDBStateBackend(&#34;...&#34;); backend.enableFullyAsyncSnapshots(); Release Notes - Flink - Version 1.1.3 # Bug [FLINK-2662] - CompilerException: &quot;Bug: Plan generation for Unions picked a ship strategy between binary plan operators.&quot; [FLINK-4311] - TableInputFormat fails when reused on next split [FLINK-4329] - Fix Streaming File Source Timestamps/Watermarks Handling [FLINK-4485] - Finished jobs in yarn session fill /tmp filesystem [FLINK-4513] - Kafka connector documentation refers to Flink 1.1-SNAPSHOT [FLINK-4514] - ExpiredIteratorException in Kinesis Consumer on long catch-ups to head of stream [FLINK-4540] - Detached job execution may prevent cluster shutdown [FLINK-4544] - TaskManager metrics are vulnerable to custom JMX bean installation [FLINK-4566] - ProducerFailedException does not properly preserve Exception causes [FLINK-4588] - Fix Merging of Covering Window in MergingWindowSet [FLINK-4589] - Fix Merging of Covering Window in MergingWindowSet [FLINK-4616] - Kafka consumer doesn&#39;t store last emmited watermarks per partition in state [FLINK-4618] - FlinkKafkaConsumer09 should start from the next record on startup from offsets in Kafka [FLINK-4619] - JobManager does not answer to client when restore from savepoint fails [FLINK-4636] - AbstractCEPPatternOperator fails to restore state [FLINK-4640] - Serialization of the initialValue of a Fold on WindowedStream fails [FLINK-4651] - Re-register processing time timers at the WindowOperator upon recovery. [FLINK-4663] - Flink JDBCOutputFormat logs wrong WARN message [FLINK-4672] - TaskManager accidentally decorates Kill messages [FLINK-4677] - Jars with no job executions produces NullPointerException in ClusterClient [FLINK-4702] - Kafka consumer must commit offsets asynchronously [FLINK-4727] - Kafka 0.9 Consumer should also checkpoint auto retrieved offsets even when no data is read [FLINK-4732] - Maven junction plugin security threat [FLINK-4777] - ContinuousFileMonitoringFunction may throw IOException when files are moved [FLINK-4788] - State backend class cannot be loaded, because fully qualified name converted to lower-case Improvement [FLINK-4396] - GraphiteReporter class not found at startup of jobmanager [FLINK-4574] - Strengthen fetch interval implementation in Kinesis consumer [FLINK-4723] - Unify behaviour of committed offsets to Kafka / ZK for Kafka 0.8 and 0.9 consumer `}),e.add({id:255,href:"/2016/09/05/apache-flink-1.1.2-released/",title:"Apache Flink 1.1.2 Released",section:"Flink Blog",content:`The Apache Flink community released another bugfix version of the Apache Flink 1.1. series.
 We recommend all users to upgrade to Flink 1.1.2.
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.1.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.2&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.2&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-Release Notes - Flink - Version 1.1.2 [FLINK-4236] - Flink Dashboard stops showing list of uploaded jars if main method cannot be looked up [FLINK-4309] - Potential null pointer dereference in DelegatingConfiguration#keySet() [FLINK-4334] - Shaded Hadoop1 jar not fully excluded in Quickstart [FLINK-4341] - Kinesis connector does not emit maximum watermark properly [FLINK-4402] - Wrong metrics parameter names in documentation [FLINK-4409] - class conflict between jsr305-1.3.9.jar and flink-shaded-hadoop2-1.1.1.jar [FLINK-4411] - [py] Chained dual input children are not properly propagated [FLINK-4412] - [py] Chaining does not properly handle broadcast variables [FLINK-4425] - &quot;Out Of Memory&quot; during savepoint deserialization [FLINK-4454] - Lookups for JobManager address in config [FLINK-4480] - Incorrect link to elastic.co in documentation [FLINK-4486] - JobManager not fully running when yarn-session.sh finishes [FLINK-4488] - Prevent cluster shutdown after job execution for non-detached jobs [FLINK-4514] - ExpiredIteratorException in Kinesis Consumer on long catch-ups to head of stream [FLINK-4526] - ApplicationClient: remove redundant proxy messages [FLINK-3866] - StringArraySerializer claims type is immutable; shouldn&#39;t [FLINK-3899] - Document window processing with Reduce/FoldFunction + WindowFunction [FLINK-4302] - Add JavaDocs to MetricConfig [FLINK-4495] - Running multiple jobs on yarn (without yarn-session) `}),e.add({id:255,href:"/2016/08/24/flink-forward-2016-announcing-schedule-keynotes-and-panel-discussion/",title:"Flink Forward 2016: Announcing Schedule, Keynotes, and Panel Discussion",section:"Flink Blog",content:`An update for the Flink community: the Flink Forward 2016 schedule is now available online. This year's event will include 2 days of talks from stream processing experts at Google, MapR, Alibaba, Netflix, Cloudera, and more. Following the talks is a full day of hands-on Flink training.
+Release Notes - Flink - Version 1.1.2 [FLINK-4236] - Flink Dashboard stops showing list of uploaded jars if main method cannot be looked up [FLINK-4309] - Potential null pointer dereference in DelegatingConfiguration#keySet() [FLINK-4334] - Shaded Hadoop1 jar not fully excluded in Quickstart [FLINK-4341] - Kinesis connector does not emit maximum watermark properly [FLINK-4402] - Wrong metrics parameter names in documentation [FLINK-4409] - class conflict between jsr305-1.3.9.jar and flink-shaded-hadoop2-1.1.1.jar [FLINK-4411] - [py] Chained dual input children are not properly propagated [FLINK-4412] - [py] Chaining does not properly handle broadcast variables [FLINK-4425] - &quot;Out Of Memory&quot; during savepoint deserialization [FLINK-4454] - Lookups for JobManager address in config [FLINK-4480] - Incorrect link to elastic.co in documentation [FLINK-4486] - JobManager not fully running when yarn-session.sh finishes [FLINK-4488] - Prevent cluster shutdown after job execution for non-detached jobs [FLINK-4514] - ExpiredIteratorException in Kinesis Consumer on long catch-ups to head of stream [FLINK-4526] - ApplicationClient: remove redundant proxy messages [FLINK-3866] - StringArraySerializer claims type is immutable; shouldn&#39;t [FLINK-3899] - Document window processing with Reduce/FoldFunction + WindowFunction [FLINK-4302] - Add JavaDocs to MetricConfig [FLINK-4495] - Running multiple jobs on yarn (without yarn-session) `}),e.add({id:256,href:"/2016/08/24/flink-forward-2016-announcing-schedule-keynotes-and-panel-discussion/",title:"Flink Forward 2016: Announcing Schedule, Keynotes, and Panel Discussion",section:"Flink Blog",content:`An update for the Flink community: the Flink Forward 2016 schedule is now available online. This year's event will include 2 days of talks from stream processing experts at Google, MapR, Alibaba, Netflix, Cloudera, and more. Following the talks is a full day of hands-on Flink training.
 Ted Dunning has been announced as a keynote speaker at the event. Ted is the VP of Incubator at Apache Software Foundation, the Chief Application Architect at MapR Technologies, and a mentor on many recent projects. He'll present "How Can We Take Flink Forward?" on the second day of the conference.
 Following Ted's keynote there will be a panel discussion on "Large Scale Streaming in Production". As stream processing systems become more mainstream, companies are looking to empower their users to take advantage of this technology. We welcome leading stream processing experts Xiaowei Jiang (Alibaba), Monal Daxini (Netflix), Maxim Fateev (Uber), and Ted Dunning (MapR Technologies) on stage to talk about the challenges they have faced and the solutions they have discovered while implementing stream processing systems at very large scale. The panel will be moderated by Jamie Grier (data Artisans).
 The welcome keynote on Monday, September 12, will be presented by data Artisans' co-founders Kostas Tzoumas and Stephan Ewen. They will talk about "The maturing data streaming ecosystem and Apache Flink’s accelerated growth". In this talk, Kostas and Stephan discuss several large-scale stream processing use cases that the data Artisans team has seen over the past year.
 And one more recent addition to the program: Maxim Fateev of Uber will present "Beyond the Watermark: On-Demand Backfilling in Flink". Flink’s time-progress model is built around a single watermark, which is incompatible with Uber’s business need for generating aggregates retroactively. Maxim's talk covers Uber's solution for on-demand backfilling.
-We hope to see many community members at Flink Forward 2016. Registration is available online: flink-forward.org/registration `}),e.add({id:256,href:"/2016/08/04/announcing-apache-flink-1.1.0/",title:"Announcing Apache Flink 1.1.0",section:"Flink Blog",content:`Important: The Maven artifacts published with version 1.1.0 on Maven central have a Hadoop dependency issue. It is highly recommended to use 1.1.1 or 1.1.1-hadoop1 as the Flink version. The Apache Flink community is pleased to announce the availability of Flink 1.1.0.
+We hope to see many community members at Flink Forward 2016. Registration is available online: flink-forward.org/registration `}),e.add({id:257,href:"/2016/08/04/announcing-apache-flink-1.1.0/",title:"Announcing Apache Flink 1.1.0",section:"Flink Blog",content:`Important: The Maven artifacts published with version 1.1.0 on Maven central have a Hadoop dependency issue. It is highly recommended to use 1.1.1 or 1.1.1-hadoop1 as the Flink version. The Apache Flink community is pleased to announce the availability of Flink 1.1.0.
 This release is the first major release in the 1.X.X series of releases, which maintains API compatibility with 1.0.0. This means that your applications written against stable APIs of Flink 1.0.0 will compile and run with Flink 1.1.0. 95 contributors provided bug fixes, improvements, and new features such that in total more than 450 JIRA issues could be resolved. See the complete changelog for more details.
 We encourage everyone to download the release and check out the documentation. Feedback through the Flink mailing lists is, as always, very welcome!
 Some highlights of the release are listed in the following sections.
@@ -5027,11 +5037,11 @@
 Counter counter = getRuntimeContext() .getMetricGroup() .counter(&#34;my-counter&#34;); All registered metrics will be exposed via reporters. Out of the box, Flinks comes with support for JMX, Ganglia, Graphite, and statsD. In addition to your custom metrics, Flink exposes many internal metrics like checkpoint sizes and JVM stats.
 Check out the Metrics documentation for more details.
 List of Contributors # The following 95 people contributed to this release:
-Abdullah Ozturk Ajay Bhat Alexey Savartsov Aljoscha Krettek Andrea Sella Andrew Palumbo Chenguang He Chiwan Park David Moravek Dominik Bruhn Dyana Rose Fabian Hueske Flavio Pompermaier Gabor Gevay Gabor Horvath Geoffrey Mon Gordon Tai Greg Hogan Gyula Fora Henry Saputra Ignacio N. Lucero Ascencio Igor Berman Ismaël Mejía Ivan Mushketyk Jark Wu Jiri Simsa Jonas Traub Josh Joshi Joshua Herman Ken Krugler Konstantin Knauf Lasse Dalegaard Li Fanxi MaBiao Mao Wei Mark Reddy Martin Junghanns Martin Liesenberg Maximilian Michels Michal Fijolek Márton Balassi Nathan Howell Niels Basjes Niels Zeilemaker Phetsarath, Sourigna Robert Metzger Scott Kidder Sebastian Klemke Shahin Shannon Carey Shannon Quinn Stefan Richter Stefano Baghino Stefano Bortoli Stephan Ewen Steve Cosenza Sumit Chawla Tatu Saloranta Tianji Li Till Rohrmann Todd Lisonbee Tony Baines Trevor Grant Ufuk Celebi Vasudevan Yijie Shen Zack Pierce Zhai Jia chengxiang li chobeat danielblazevski dawid dawidwys eastcirclek erli ding gallenvara kl0u mans2singh markreddy mjsax nikste omaralvarez philippgrulich ramkrishna sahitya-pavurala samaitra smarthi spkavuly subhankar twalthr vasia xueyan.li zentol 卫乐 `}),e.add({id:257,href:"/2016/08/04/flink-1.1.1-released/",title:"Flink 1.1.1 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.1.1.
+Abdullah Ozturk Ajay Bhat Alexey Savartsov Aljoscha Krettek Andrea Sella Andrew Palumbo Chenguang He Chiwan Park David Moravek Dominik Bruhn Dyana Rose Fabian Hueske Flavio Pompermaier Gabor Gevay Gabor Horvath Geoffrey Mon Gordon Tai Greg Hogan Gyula Fora Henry Saputra Ignacio N. Lucero Ascencio Igor Berman Ismaël Mejía Ivan Mushketyk Jark Wu Jiri Simsa Jonas Traub Josh Joshi Joshua Herman Ken Krugler Konstantin Knauf Lasse Dalegaard Li Fanxi MaBiao Mao Wei Mark Reddy Martin Junghanns Martin Liesenberg Maximilian Michels Michal Fijolek Márton Balassi Nathan Howell Niels Basjes Niels Zeilemaker Phetsarath, Sourigna Robert Metzger Scott Kidder Sebastian Klemke Shahin Shannon Carey Shannon Quinn Stefan Richter Stefano Baghino Stefano Bortoli Stephan Ewen Steve Cosenza Sumit Chawla Tatu Saloranta Tianji Li Till Rohrmann Todd Lisonbee Tony Baines Trevor Grant Ufuk Celebi Vasudevan Yijie Shen Zack Pierce Zhai Jia chengxiang li chobeat danielblazevski dawid dawidwys eastcirclek erli ding gallenvara kl0u mans2singh markreddy mjsax nikste omaralvarez philippgrulich ramkrishna sahitya-pavurala samaitra smarthi spkavuly subhankar twalthr vasia xueyan.li zentol 卫乐 `}),e.add({id:258,href:"/2016/08/04/flink-1.1.1-released/",title:"Flink 1.1.1 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.1.1.
 The Maven artifacts published on Maven central for 1.1.0 had a Hadoop dependency issue: No Hadoop 1 specific version (with version 1.1.0-hadoop1) was deployed and 1.1.0 artifacts have a dependency on Hadoop 1 instead of Hadoop 2.
 This was fixed with this release and we highly recommend all users to use this version of Flink by bumping your Flink dependencies to version 1.1.1:
 &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.1.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.10&lt;/artifactId&gt; &lt;version&gt;1.1.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-`}),e.add({id:258,href:"/2016/05/24/stream-processing-for-everyone-with-sql-and-apache-flink/",title:"Stream Processing for Everyone with SQL and Apache Flink",section:"Flink Blog",content:`The capabilities of open source systems for distributed stream processing have evolved significantly over the last years. Initially, the first systems in the field (notably Apache Storm) provided low latency processing, but were limited to at-least-once guarantees, processing-time semantics, and rather low-level APIs. Since then, several new systems emerged and pushed the state of the art of open source stream processing in several dimensions. Today, users of Apache Flink or Apache Beam can use fluent Scala and Java APIs to implement stream processing jobs that operate in event-time with exactly-once semantics at high throughput and low latency.
+`}),e.add({id:259,href:"/2016/05/24/stream-processing-for-everyone-with-sql-and-apache-flink/",title:"Stream Processing for Everyone with SQL and Apache Flink",section:"Flink Blog",content:`The capabilities of open source systems for distributed stream processing have evolved significantly over the last years. Initially, the first systems in the field (notably Apache Storm) provided low latency processing, but were limited to at-least-once guarantees, processing-time semantics, and rather low-level APIs. Since then, several new systems emerged and pushed the state of the art of open source stream processing in several dimensions. Today, users of Apache Flink or Apache Beam can use fluent Scala and Java APIs to implement stream processing jobs that operate in event-time with exactly-once semantics at high throughput and low latency.
 In the meantime, stream processing has taken off in the industry. We are witnessing a rapidly growing interest in stream processing which is reflected by prevalent deployments of streaming processing infrastructure such as Apache Kafka and Apache Flink. The increasing number of available data streams results in a demand for people that can analyze streaming data and turn it into real-time insights. However, stream data analysis requires a special skill set including knowledge of streaming concepts such as the characteristics of unbounded streams, windows, time, and state as well as the skills to implement stream analysis jobs usually against Java or Scala APIs. People with this skill set are rare and hard to find.
 About six months ago, the Apache Flink community started an effort to add a SQL interface for stream data analysis. SQL is the standard language to access and process data. Everybody who occasionally analyzes data is familiar with SQL. Consequently, a SQL interface for stream data processing will make this technology accessible to a much wider audience. Moreover, SQL support for streaming data will also enable new use cases such as interactive and ad-hoc stream analysis and significantly simplify many applications including stream ingestion and simple transformations. In this blog post, we report on the current status, architectural design, and future plans of the Apache Flink community to implement support for SQL as a language for analyzing data streams.
 Where did we come from? # With the 0.9.0-milestone1 release, Apache Flink added an API to process relational data with SQL-like expressions called the Table API. The central concept of this API is a Table, a structured data set or stream on which relational operations can be applied. The Table API is tightly integrated with the DataSet and DataStream API. A Table can be easily created from a DataSet or DataStream and can also be converted back into a DataSet or DataStream as the following example shows
@@ -5050,14 +5060,14 @@
 SQL (following the syntax proposal of Calcite’s streaming SQL document) # SELECT STREAM TUMBLE_END(time, INTERVAL &#39;1&#39; DAY) AS day, location AS room, AVG((tempF - 32) * 0.556) AS avgTempC FROM sensorData WHERE location LIKE &#39;room%&#39; GROUP BY TUMBLE(time, INTERVAL &#39;1&#39; DAY), location Table API # val avgRoomTemp: Table = tableEnv.ingest(&#34;sensorData&#34;) .where(&#39;location.like(&#34;room%&#34;)) .partitionBy(&#39;location) .window(Tumbling every Days(1) on &#39;time as &#39;w) .select(&#39;w.end, &#39;location, , ((&#39;tempF - 32) * 0.556).avg as &#39;avgTempCs) What&rsquo;s up next? # The Flink community is actively working on SQL support for the next minor version Flink 1.1.0. In the first version, SQL (and Table API) queries on streams will be limited to selection, filter, and union operators. Compared to Flink 1.0.0, the revised Table API will support many more scalar functions and be able to read tables from external sources and write them back to external sinks. A lot of work went into reworking the architecture of the Table API and integrating Apache Calcite.
 In Flink 1.2.0, the feature set of SQL on streams will be significantly extended. Among other things, we plan to support different types of window aggregates and maybe also streaming joins. For this effort, we want to closely collaborate with the Apache Calcite community and help extending Calcite&rsquo;s support for relational operations on streaming data when necessary.
 If this post made you curious and you want to try out Flink’s SQL interface and the new Table API, we encourage you to do so! Simply clone the SNAPSHOT master branch and check out the Table API documentation for the SNAPSHOT version. Please note that the branch is under heavy development, and hence some code examples in this blog post might not work. We are looking forward to your feedback and welcome contributions.
-`}),e.add({id:259,href:"/2016/05/11/flink-1.0.3-released/",title:"Flink 1.0.3 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.0.3, the third bugfix release of the 1.0 series.
+`}),e.add({id:260,href:"/2016/05/11/flink-1.0.3-released/",title:"Flink 1.0.3 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.0.3, the third bugfix release of the 1.0 series.
 We recommend all users updating to this release by bumping the version of your Flink dependencies to 1.0.3 and updating the binaries on the server. You can find the binaries on the updated Downloads page.
-Fixed Issues # Bug # [FLINK-3790] [streaming] Use proper hadoop config in rolling sink [FLINK-3840] Remove Testing Files in RocksDB Backend [FLINK-3835] [optimizer] Add input id to JSON plan to resolve ambiguous input names [hotfix] OptionSerializer.duplicate to respect stateful element serializer [FLINK-3803] [runtime] Pass CheckpointStatsTracker to ExecutionGraph [hotfix] [cep] Make cep window border treatment consistent Improvement # [FLINK-3678] [dist, docs] Make Flink logs directory configurable Docs # [docs] Add note about S3AFileSystem &lsquo;buffer.dir&rsquo; property [docs] Update AWS S3 docs Tests # [FLINK-3860] [connector-wikiedits] Add retry loop to WikipediaEditsSourceTest [streaming-contrib] Fix port clash in DbStateBackend tests `}),e.add({id:260,href:"/2016/04/22/flink-1.0.2-released/",title:"Flink 1.0.2 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.0.2, the second bugfix release of the 1.0 series.
+Fixed Issues # Bug # [FLINK-3790] [streaming] Use proper hadoop config in rolling sink [FLINK-3840] Remove Testing Files in RocksDB Backend [FLINK-3835] [optimizer] Add input id to JSON plan to resolve ambiguous input names [hotfix] OptionSerializer.duplicate to respect stateful element serializer [FLINK-3803] [runtime] Pass CheckpointStatsTracker to ExecutionGraph [hotfix] [cep] Make cep window border treatment consistent Improvement # [FLINK-3678] [dist, docs] Make Flink logs directory configurable Docs # [docs] Add note about S3AFileSystem &lsquo;buffer.dir&rsquo; property [docs] Update AWS S3 docs Tests # [FLINK-3860] [connector-wikiedits] Add retry loop to WikipediaEditsSourceTest [streaming-contrib] Fix port clash in DbStateBackend tests `}),e.add({id:261,href:"/2016/04/22/flink-1.0.2-released/",title:"Flink 1.0.2 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.0.2, the second bugfix release of the 1.0 series.
 We recommend all users updating to this release by bumping the version of your Flink dependencies to 1.0.2 and updating the binaries on the server. You can find the binaries on the updated Downloads page.
-Fixed Issues # Bug # [FLINK-3657] [dataSet] Change access of DataSetUtils.countElements() to &lsquo;public&rsquo; [FLINK-3762] [core] Enable Kryo reference tracking [FLINK-3732] [core] Fix potential null deference in ExecutionConfig#equals() [FLINK-3760] Fix StateDescriptor.readObject [FLINK-3730] Fix RocksDB Local Directory Initialization [FLINK-3712] Make all dynamic properties available to the CLI frontend [FLINK-3688] WindowOperator.trigger() does not emit Watermark anymore [FLINK-3697] Properly access type information for nested POJO key selection Improvement # [FLINK-3654] Disable Write-Ahead-Log in RocksDB State Docs # [FLINK-2544] [docs] Add Java 8 version for building PowerMock tests to docs [FLINK-3469] [docs] Improve documentation for grouping keys [FLINK-3634] [docs] Fix documentation for DataSetUtils.zipWithUniqueId() [FLINK-3711][docs] Documentation of Scala fold()() uses correct syntax Tests # [FLINK-3716] [kafka consumer] Decreasing socket timeout so testFailOnNoBroker() will pass before JUnit timeout `}),e.add({id:261,href:"/2016/04/14/flink-forward-2016-call-for-submissions-is-now-open/",title:"Flink Forward 2016 Call for Submissions Is Now Open",section:"Flink Blog",content:`We are happy to announce that the call for submissions for Flink Forward 2016 is now open! The conference will take place September 12-14, 2016 in Berlin, Germany, bringing together the open source stream processing community. Most Apache Flink committers will attend the conference, making it the ideal venue to learn more about the project and its roadmap and connect with the community.
+Fixed Issues # Bug # [FLINK-3657] [dataSet] Change access of DataSetUtils.countElements() to &lsquo;public&rsquo; [FLINK-3762] [core] Enable Kryo reference tracking [FLINK-3732] [core] Fix potential null deference in ExecutionConfig#equals() [FLINK-3760] Fix StateDescriptor.readObject [FLINK-3730] Fix RocksDB Local Directory Initialization [FLINK-3712] Make all dynamic properties available to the CLI frontend [FLINK-3688] WindowOperator.trigger() does not emit Watermark anymore [FLINK-3697] Properly access type information for nested POJO key selection Improvement # [FLINK-3654] Disable Write-Ahead-Log in RocksDB State Docs # [FLINK-2544] [docs] Add Java 8 version for building PowerMock tests to docs [FLINK-3469] [docs] Improve documentation for grouping keys [FLINK-3634] [docs] Fix documentation for DataSetUtils.zipWithUniqueId() [FLINK-3711][docs] Documentation of Scala fold()() uses correct syntax Tests # [FLINK-3716] [kafka consumer] Decreasing socket timeout so testFailOnNoBroker() will pass before JUnit timeout `}),e.add({id:262,href:"/2016/04/14/flink-forward-2016-call-for-submissions-is-now-open/",title:"Flink Forward 2016 Call for Submissions Is Now Open",section:"Flink Blog",content:`We are happy to announce that the call for submissions for Flink Forward 2016 is now open! The conference will take place September 12-14, 2016 in Berlin, Germany, bringing together the open source stream processing community. Most Apache Flink committers will attend the conference, making it the ideal venue to learn more about the project and its roadmap and connect with the community.
 The conference welcomes submissions on everything Flink-related, including experiences with using Flink, products based on Flink, technical talks on extending Flink, as well as connecting Flink with other open source or proprietary software.
 Read more here.
-`}),e.add({id:262,href:"/2016/04/06/introducing-complex-event-processing-cep-with-apache-flink/",title:"Introducing Complex Event Processing (CEP) with Apache Flink",section:"Flink Blog",content:`With the ubiquity of sensor networks and smart devices continuously collecting more and more data, we face the challenge to analyze an ever growing stream of data in near real-time. Being able to react quickly to changing trends or to deliver up to date business intelligence can be a decisive factor for a company’s success or failure. A key problem in real time processing is the detection of event patterns in data streams.
+`}),e.add({id:263,href:"/2016/04/06/introducing-complex-event-processing-cep-with-apache-flink/",title:"Introducing Complex Event Processing (CEP) with Apache Flink",section:"Flink Blog",content:`With the ubiquity of sensor networks and smart devices continuously collecting more and more data, we face the challenge to analyze an ever growing stream of data in near real-time. Being able to react quickly to changing trends or to deliver up to date business intelligence can be a decisive factor for a company’s success or failure. A key problem in real time processing is the detection of event patterns in data streams.
 Complex event processing (CEP) addresses exactly this problem of matching continuously incoming events against a pattern. The result of a matching are usually complex events which are derived from the input events. In contrast to traditional DBMSs where a query is executed on stored data, CEP executes data on a stored query. All data which is not relevant for the query can be immediately discarded. The advantages of this approach are obvious, given that CEP queries are applied on a potentially infinite stream of data. Furthermore, inputs are processed immediately. Once the system has seen all events for a matching sequence, results are emitted straight away. This aspect effectively leads to CEP’s real time analytics capability.
 Consequently, CEP’s processing paradigm drew significant interest and found application in a wide variety of use cases. Most notably, CEP is used nowadays for financial applications such as stock market trend and credit card fraud detection. Moreover, it is used in RFID-based tracking and monitoring, for example, to detect thefts in a warehouse where items are not properly checked out. CEP can also be used to detect network intrusion by specifying patterns of suspicious user behaviour.
 Apache Flink with its true streaming nature and its capabilities for low latency as well as high throughput stream processing is a natural fit for CEP workloads. Consequently, the Flink community has introduced the first version of a new CEP library with Flink 1.0. In the remainder of this blog post, we introduce Flink’s CEP library and we illustrate its ease of use through the example of monitoring a data center.
@@ -5081,9 +5091,9 @@
 Conclusion # In this blog post we have seen how easy it is to reason about event streams using Flink’s CEP library. Using the example of monitoring and alert generation for a data center, we have implemented a short program which notifies us when a rack is about to overheat and potentially to fail.
 In the future, the Flink community will further extend the CEP library’s functionality and expressiveness. Next on the road map is support for a regular expression-like pattern specification, including Kleene star, lower and upper bounds, and negation. Furthermore, it is planned to allow the where-clause to access fields of previously matched events. This feature will allow to prune unpromising event sequences early.
 Note: The example code requires Flink 1.0.1 or higher.
-`}),e.add({id:263,href:"/2016/04/06/flink-1.0.1-released/",title:"Flink 1.0.1 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.0.1, the first bugfix release of the 1.0 series.
+`}),e.add({id:264,href:"/2016/04/06/flink-1.0.1-released/",title:"Flink 1.0.1 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 1.0.1, the first bugfix release of the 1.0 series.
 We recommend all users updating to this release by bumping the version of your Flink dependencies to 1.0.1 and updating the binaries on the server. You can find the binaries on the updated Downloads page.
-Fixed Issues # Bug [FLINK-3179] - Combiner is not injected if Reduce or GroupReduce input is explicitly partitioned [FLINK-3472] - JDBCInputFormat.nextRecord(..) has misleading message on NPE [FLINK-3491] - HDFSCopyUtilitiesTest fails on Windows [FLINK-3495] - RocksDB Tests can&#39;t run on Windows [FLINK-3533] - Update the Gelly docs wrt examples and cluster execution [FLINK-3563] - .returns() doesn&#39;t compile when using .map() with a custom MapFunction [FLINK-3566] - Input type validation often fails on custom TypeInfo implementations [FLINK-3578] - Scala DataStream API does not support Rich Window Functions [FLINK-3595] - Kafka09 consumer thread does not interrupt when stuck in record emission [FLINK-3602] - Recursive Types are not supported / crash TypeExtractor [FLINK-3621] - Misleading documentation of memory configuration parameters [FLINK-3629] - In wikiedits Quick Start example, &quot;The first call, .window()&quot; should be &quot;The first call, .timeWindow()&quot; [FLINK-3651] - Fix faulty RollingSink Restore [FLINK-3653] - recovery.zookeeper.storageDir is not documented on the configuration page [FLINK-3663] - FlinkKafkaConsumerBase.logPartitionInfo is missing a log marker [FLINK-3681] - CEP library does not support Java 8 lambdas as select function [FLINK-3682] - CEP operator does not set the processing timestamp correctly [FLINK-3684] - CEP operator does not forward watermarks properly Improvement [FLINK-3570] - Replace random NIC selection heuristic by InetAddress.getLocalHost [FLINK-3575] - Update Working With State Section in Doc [FLINK-3591] - Replace Quickstart K-Means Example by Streaming Example Test [FLINK-2444] - Add tests for HadoopInputFormats [FLINK-2445] - Add tests for HadoopOutputFormats `}),e.add({id:264,href:"/2016/03/08/announcing-apache-flink-1.0.0/",title:"Announcing Apache Flink 1.0.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 1.0.0 release. The community put significant effort into improving and extending Apache Flink since the last release, focusing on improving the experience of writing and executing data stream processing pipelines in production.
+Fixed Issues # Bug [FLINK-3179] - Combiner is not injected if Reduce or GroupReduce input is explicitly partitioned [FLINK-3472] - JDBCInputFormat.nextRecord(..) has misleading message on NPE [FLINK-3491] - HDFSCopyUtilitiesTest fails on Windows [FLINK-3495] - RocksDB Tests can&#39;t run on Windows [FLINK-3533] - Update the Gelly docs wrt examples and cluster execution [FLINK-3563] - .returns() doesn&#39;t compile when using .map() with a custom MapFunction [FLINK-3566] - Input type validation often fails on custom TypeInfo implementations [FLINK-3578] - Scala DataStream API does not support Rich Window Functions [FLINK-3595] - Kafka09 consumer thread does not interrupt when stuck in record emission [FLINK-3602] - Recursive Types are not supported / crash TypeExtractor [FLINK-3621] - Misleading documentation of memory configuration parameters [FLINK-3629] - In wikiedits Quick Start example, &quot;The first call, .window()&quot; should be &quot;The first call, .timeWindow()&quot; [FLINK-3651] - Fix faulty RollingSink Restore [FLINK-3653] - recovery.zookeeper.storageDir is not documented on the configuration page [FLINK-3663] - FlinkKafkaConsumerBase.logPartitionInfo is missing a log marker [FLINK-3681] - CEP library does not support Java 8 lambdas as select function [FLINK-3682] - CEP operator does not set the processing timestamp correctly [FLINK-3684] - CEP operator does not forward watermarks properly Improvement [FLINK-3570] - Replace random NIC selection heuristic by InetAddress.getLocalHost [FLINK-3575] - Update Working With State Section in Doc [FLINK-3591] - Replace Quickstart K-Means Example by Streaming Example Test [FLINK-2444] - Add tests for HadoopInputFormats [FLINK-2445] - Add tests for HadoopOutputFormats `}),e.add({id:265,href:"/2016/03/08/announcing-apache-flink-1.0.0/",title:"Announcing Apache Flink 1.0.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 1.0.0 release. The community put significant effort into improving and extending Apache Flink since the last release, focusing on improving the experience of writing and executing data stream processing pipelines in production.
 Flink version 1.0.0 marks the beginning of the 1.X.X series of releases, which will maintain backwards compatibility with 1.0.0. This means that applications written against stable APIs of Flink 1.0.0 will compile and run with all Flink versions in the 1. series. This is the first time we are formally guaranteeing compatibility in Flink&rsquo;s history, and we therefore see this release as a major milestone of the project, perhaps the most important since graduation as a top-level project.
 Apart from backwards compatibility, Flink 1.0.0 brings a variety of new user-facing features, as well as tons of bug fixes. About 64 contributors provided bug fixes, improvements, and new features such that in total more than 450 JIRA issues could be resolved.
 We encourage everyone to download the release and check out the documentation. Feedback through the Flink mailing lists is, as always, very welcome!
@@ -5098,9 +5108,9 @@
 The checkpoint coordinator now exposes statistics through our REST monitoring API and the web interface. Users can review the checkpoint size and duration on a per-operator basis and see the last completed checkpoints. This is helpful for identifying performance issues, such as processing slowdown by the checkpoints.
 Improved Kafka connector and support for Kafka 0.9 # Flink 1.0 supports both Kafka 0.8 and 0.9. With the new release, Flink exposes Kafka metrics for the producers and the 0.9 consumer through Flink’s accumulator system. We also enhanced the existing connector for Kafka 0.8, allowing users to subscribe to multiple topics in one source.
 Changelog and known issues # This release resolves more than 450 issues, including bug fixes, improvements, and new features. See the complete changelog and known issues.
-List of contributors # Abhishek Agarwal Ajay Bhat Aljoscha Krettek Andra Lungu Andrea Sella Chesnay Schepler Chiwan Park Daniel Pape Fabian Hueske Filipe Correia Frederick F. Kautz IV Gabor Gevay Gabor Horvath Georgios Andrianakis Greg Hogan Gyula Fora Henry Saputra Hilmi Yildirim Hubert Czerpak Jark Wu Johannes Jun Aoki Jun Aoki Kostas Kloudas Li Chengxiang Lun Gao Martin Junghanns Martin Liesenberg Matthias J. Sax Maximilian Michels Márton Balassi Nick Dimiduk Niels Basjes Omer Katz Paris Carbone Patrice Freydiere Peter Vandenabeele Piotr Godek Prez Cannady Robert Metzger Romeo Kienzler Sachin Goel Saumitra Shahapure Sebastian Klemke Stefano Baghino Stephan Ewen Stephen Samuel Subhobrata Dey Suneel Marthi Ted Yu Theodore Vasiloudis Till Rohrmann Timo Walther Trevor Grant Ufuk Celebi Ulf Karlsson Vasia Kalavri fversaci madhukar qingmeng.wyh ramkrishna rtudoran sahitya-pavurala zhangminglei `}),e.add({id:265,href:"/2016/02/11/flink-0.10.2-released/",title:"Flink 0.10.2 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 0.10.2, the second bugfix release of the 0.10 series.
+List of contributors # Abhishek Agarwal Ajay Bhat Aljoscha Krettek Andra Lungu Andrea Sella Chesnay Schepler Chiwan Park Daniel Pape Fabian Hueske Filipe Correia Frederick F. Kautz IV Gabor Gevay Gabor Horvath Georgios Andrianakis Greg Hogan Gyula Fora Henry Saputra Hilmi Yildirim Hubert Czerpak Jark Wu Johannes Jun Aoki Jun Aoki Kostas Kloudas Li Chengxiang Lun Gao Martin Junghanns Martin Liesenberg Matthias J. Sax Maximilian Michels Márton Balassi Nick Dimiduk Niels Basjes Omer Katz Paris Carbone Patrice Freydiere Peter Vandenabeele Piotr Godek Prez Cannady Robert Metzger Romeo Kienzler Sachin Goel Saumitra Shahapure Sebastian Klemke Stefano Baghino Stephan Ewen Stephen Samuel Subhobrata Dey Suneel Marthi Ted Yu Theodore Vasiloudis Till Rohrmann Timo Walther Trevor Grant Ufuk Celebi Ulf Karlsson Vasia Kalavri fversaci madhukar qingmeng.wyh ramkrishna rtudoran sahitya-pavurala zhangminglei `}),e.add({id:266,href:"/2016/02/11/flink-0.10.2-released/",title:"Flink 0.10.2 Released",section:"Flink Blog",content:`Today, the Flink community released Flink version 0.10.2, the second bugfix release of the 0.10 series.
 We recommend all users updating to this release by bumping the version of your Flink dependencies to 0.10.2 and updating the binaries on the server.
-Issues fixed # FLINK-3242: Adjust StateBackendITCase for 0.10 signatures of state backends FLINK-3236: Flink user code classloader as parent classloader from Flink core classes FLINK-2962: Cluster startup script refers to unused variable FLINK-3151: Downgrade to Netty version 4.0.27.Final FLINK-3224: Call setInputType() on output formats that implement InputTypeConfigurable FLINK-3218: Fix overriding of user parameters when merging Hadoop configurations FLINK-3189: Fix argument parsing of CLI client INFO action FLINK-3176: Improve documentation for window apply FLINK-3185: Log error on failure during recovery FLINK-3185: Don&rsquo;t swallow test failure Exception FLINK-3147: Expose HadoopOutputFormatBase fields as protected FLINK-3145: Pin Kryo version of transitive dependencies FLINK-3143: Update Closure Cleaner&rsquo;s ASM references to ASM5 FLINK-3136: Fix shaded imports in ClosureCleaner.scala FLINK-3108: JoinOperator&rsquo;s with() calls the wrong TypeExtractor method FLINK-3125: Web server starts also when JobManager log files cannot be accessed. FLINK-3080: Relax restrictions of DataStream.union() FLINK-3081: Properly stop periodic Kafka committer FLINK-3082: Fixed confusing error about an interface that no longer exists FLINK-3067: Enforce zkclient 0.7 for Kafka FLINK-3020: Set number of task slots to maximum parallelism in local execution `}),e.add({id:266,href:"/2015/12/18/flink-2015-a-year-in-review-and-a-lookout-to-2016/",title:"Flink 2015: A year in review, and a lookout to 2016",section:"Flink Blog",content:`With 2015 ending, we thought that this would be good time to reflect on the amazing work done by the Flink community over this past year, and how much this community has grown.
+Issues fixed # FLINK-3242: Adjust StateBackendITCase for 0.10 signatures of state backends FLINK-3236: Flink user code classloader as parent classloader from Flink core classes FLINK-2962: Cluster startup script refers to unused variable FLINK-3151: Downgrade to Netty version 4.0.27.Final FLINK-3224: Call setInputType() on output formats that implement InputTypeConfigurable FLINK-3218: Fix overriding of user parameters when merging Hadoop configurations FLINK-3189: Fix argument parsing of CLI client INFO action FLINK-3176: Improve documentation for window apply FLINK-3185: Log error on failure during recovery FLINK-3185: Don&rsquo;t swallow test failure Exception FLINK-3147: Expose HadoopOutputFormatBase fields as protected FLINK-3145: Pin Kryo version of transitive dependencies FLINK-3143: Update Closure Cleaner&rsquo;s ASM references to ASM5 FLINK-3136: Fix shaded imports in ClosureCleaner.scala FLINK-3108: JoinOperator&rsquo;s with() calls the wrong TypeExtractor method FLINK-3125: Web server starts also when JobManager log files cannot be accessed. FLINK-3080: Relax restrictions of DataStream.union() FLINK-3081: Properly stop periodic Kafka committer FLINK-3082: Fixed confusing error about an interface that no longer exists FLINK-3067: Enforce zkclient 0.7 for Kafka FLINK-3020: Set number of task slots to maximum parallelism in local execution `}),e.add({id:267,href:"/2015/12/18/flink-2015-a-year-in-review-and-a-lookout-to-2016/",title:"Flink 2015: A year in review, and a lookout to 2016",section:"Flink Blog",content:`With 2015 ending, we thought that this would be good time to reflect on the amazing work done by the Flink community over this past year, and how much this community has grown.
 Overall, we have seen Flink grow in terms of functionality from an engine to one of the most complete open-source stream processing frameworks available. The community grew from a relatively small and geographically focused team, to a truly global, and one of the largest big data communities in the the Apache Software Foundation.
 We will also look at some interesting stats, including that the busiest days for Flink are Mondays (who would have thought :-).
 Community growth # Let us start with some simple statistics from Flink&rsquo;s github repository. During 2015, the Flink community doubled in size, from about 75 contributors to over 150. Forks of the repository more than tripled from 160 in February 2015 to 544 in December 2015, and the number of stars of the repository almost tripled from 289 to 813.
@@ -5123,7 +5133,7 @@
 Security: encrypt both the messages exchanged between TaskManagers and JobManager, as well as the connections for data exchange between workers.
 More streaming connectors, more runtime metrics, and continuous DataStream API enhancements: add support for more sources and sinks (e.g., Amazon Kinesis, Cassandra, Flume, etc), expose more metrics to the user, and provide continuous improvements to the DataStream API.
 If you are interested in these features, we highly encourage you to take a look at the current draft, and join the discussion on the Flink mailing lists.
-`}),e.add({id:267,href:"/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/",title:"Storm Compatibility in Apache Flink: How to run existing Storm topologies on Flink",section:"Flink Blog",content:`Apache Storm was one of the first distributed and scalable stream processing systems available in the open source space offering (near) real-time tuple-by-tuple processing semantics. Initially released by the developers at Backtype in 2011 under the Eclipse open-source license, it became popular very quickly. Only shortly afterwards, Twitter acquired Backtype. Since then, Storm has been growing in popularity, is used in production at many big companies, and is the de-facto industry standard for big data stream processing. In 2013, Storm entered the Apache incubator program, followed by its graduation to top-level in 2014.
+`}),e.add({id:268,href:"/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/",title:"Storm Compatibility in Apache Flink: How to run existing Storm topologies on Flink",section:"Flink Blog",content:`Apache Storm was one of the first distributed and scalable stream processing systems available in the open source space offering (near) real-time tuple-by-tuple processing semantics. Initially released by the developers at Backtype in 2011 under the Eclipse open-source license, it became popular very quickly. Only shortly afterwards, Twitter acquired Backtype. Since then, Storm has been growing in popularity, is used in production at many big companies, and is the de-facto industry standard for big data stream processing. In 2013, Storm entered the Apache incubator program, followed by its graduation to top-level in 2014.
 Apache Flink is a stream processing engine that improves upon older technologies like Storm in several dimensions, including strong consistency guarantees (&ldquo;exactly once&rdquo;), a higher level DataStream API, support for event time and a rich windowing system, as well as superior throughput with competitive low latency.
 While Flink offers several technical benefits over Storm, an existing investment on a codebase of applications developed for Storm often makes it difficult to switch engines. For these reasons, as part of the Flink 0.10 release, Flink ships with a Storm compatibility package that allows users to:
 Run unmodified Storm topologies using Apache Flink benefiting from superior performance. Embed Storm code (spouts and bolts) as operators inside Flink DataStream programs. Only minor code changes are required in order to submit the program to Flink instead of Storm. This minimizes the work for developers to run existing Storm topologies while leveraging Apache Flink’s fast and robust execution engine.
@@ -5140,7 +5150,7 @@
 Summary # Flink&rsquo;s compatibility package for Storm allows using unmodified Spouts and Bolts within Flink. This enables you to even embed third-party Spouts and Bolts where the source code is not available. While you can embed Spouts/Bolts in a Flink program and mix-and-match them with Flink operators, running whole topologies is the easiest way to get started and can be achieved with almost no code changes.
 If you want to try out Flink&rsquo;s Storm compatibility package checkout our Documentation.
 1. We confess, there are three lines changed compared to a Storm project &mdash;because the example covers local and remote execution. ↩
-`}),e.add({id:268,href:"/2015/12/04/introducing-stream-windows-in-apache-flink/",title:"Introducing Stream Windows in Apache Flink",section:"Flink Blog",content:`The data analysis space is witnessing an evolution from batch to stream processing for many use cases. Although batch can be handled as a special case of stream processing, analyzing never-ending streaming data often requires a shift in the mindset and comes with its own terminology (for example, “windowing” and “at-least-once”/”exactly-once” processing). This shift and the new terminology can be quite confusing for people being new to the space of stream processing. Apache Flink is a production-ready stream processor with an easy-to-use yet very expressive API to define advanced stream analysis programs. Flink&rsquo;s API features very flexible window definitions on data streams which let it stand out among other open source stream processors.
+`}),e.add({id:269,href:"/2015/12/04/introducing-stream-windows-in-apache-flink/",title:"Introducing Stream Windows in Apache Flink",section:"Flink Blog",content:`The data analysis space is witnessing an evolution from batch to stream processing for many use cases. Although batch can be handled as a special case of stream processing, analyzing never-ending streaming data often requires a shift in the mindset and comes with its own terminology (for example, “windowing” and “at-least-once”/”exactly-once” processing). This shift and the new terminology can be quite confusing for people being new to the space of stream processing. Apache Flink is a production-ready stream processor with an easy-to-use yet very expressive API to define advanced stream analysis programs. Flink&rsquo;s API features very flexible window definitions on data streams which let it stand out among other open source stream processors.
 In this blog post, we discuss the concept of windows for stream processing, present Flink&rsquo;s built-in windows, and explain its support for custom windowing semantics.
 What are windows and what are they good for? # Consider the example of a traffic sensor that counts every 15 seconds the number of vehicles passing a certain location. The resulting stream could look like:
 If you would like to know, how many vehicles passed that location, you would simply sum the individual counts. However, the nature of a sensor stream is that it continuously produces data. Such a stream never ends and it is not possible to compute a final sum that can be returned. Instead, it is possible to compute rolling sums, i.e., return for each input event an updated sum record. This would yield a new stream of partial sums.
@@ -5167,9 +5177,9 @@
 // specify an optional evictor windowed = windowed .evictor(myEvictor: Evictor[IN, WINDOW]) Finally, we apply a WindowFunction that returns elements of type OUT to obtain a DataStream[OUT].
 // apply window function to windowed stream val output: DataStream[OUT] = windowed .apply(myWinFunc: WindowFunction[IN, OUT, KEY, WINDOW]) With Flink&rsquo;s internal windowing mechanics and its exposure through the DataStream API it is possible to implement very custom windowing logic such as session windows or windows that emit early results if the values exceed a certain threshold.
 Conclusion # Support for various types of windows over continuous data streams is a must-have for modern stream processors. Apache Flink is a stream processor with a very strong feature set, including a very flexible mechanism to build and evaluate windows over continuous data streams. Flink provides pre-defined window operators for common uses cases as well as a toolbox that allows to define very custom windowing logic. The Flink community will add more pre-defined window operators as we learn the requirements from our users.
-`}),e.add({id:269,href:"/2015/11/27/flink-0.10.1-released/",title:"Flink 0.10.1 released",section:"Flink Blog",content:`Today, the Flink community released the first bugfix release of the 0.10 series of Flink.
+`}),e.add({id:270,href:"/2015/11/27/flink-0.10.1-released/",title:"Flink 0.10.1 released",section:"Flink Blog",content:`Today, the Flink community released the first bugfix release of the 0.10 series of Flink.
 We recommend all users updating to this release, by bumping the version of your Flink dependencies and updating the binaries on the server.
-Issues fixed # [FLINK-2879] - Links in documentation are broken [FLINK-2938] - Streaming docs not in sync with latest state changes [FLINK-2942] - Dangling operators in web UI&#39;s program visualization (non-deterministic) [FLINK-2967] - TM address detection might not always detect the right interface on slow networks / overloaded JMs [FLINK-2977] - Cannot access HBase in a Kerberos secured Yarn cluster [FLINK-2987] - Flink 0.10 fails to start on YARN 2.6.0 [FLINK-2989] - Job Cancel button doesn&#39;t work on Yarn [FLINK-3005] - Commons-collections object deserialization remote command execution vulnerability [FLINK-3011] - Cannot cancel failing/restarting streaming job from the command line [FLINK-3019] - CLI does not list running/restarting jobs [FLINK-3020] - Local streaming execution: set number of task manager slots to the maximum parallelism [FLINK-3024] - TimestampExtractor Does not Work When returning Long.MIN_VALUE [FLINK-3032] - Flink does not start on Hadoop 2.7.1 (HDP), due to class conflict [FLINK-3043] - Kafka Connector description in Streaming API guide is wrong/outdated [FLINK-3047] - Local batch execution: set number of task manager slots to the maximum parallelism [FLINK-3052] - Optimizer does not push properties out of bulk iterations [FLINK-2966] - Improve the way job duration is reported on web frontend. [FLINK-2974] - Add periodic offset commit to Kafka Consumer if checkpointing is disabled [FLINK-3028] - Cannot cancel restarting job via web frontend [FLINK-3040] - Add docs describing how to configure State Backends [FLINK-3041] - Twitter Streaming Description section of Streaming Programming guide refers to an incorrect example &#39;TwitterLocal&#39; `}),e.add({id:270,href:"/2015/11/16/announcing-apache-flink-0.10.0/",title:"Announcing Apache Flink 0.10.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 0.10.0 release. The community put significant effort into improving and extending Apache Flink since the last release, focusing on data stream processing and operational features. About 80 contributors provided bug fixes, improvements, and new features such that in total more than 400 JIRA issues could be resolved.
+Issues fixed # [FLINK-2879] - Links in documentation are broken [FLINK-2938] - Streaming docs not in sync with latest state changes [FLINK-2942] - Dangling operators in web UI&#39;s program visualization (non-deterministic) [FLINK-2967] - TM address detection might not always detect the right interface on slow networks / overloaded JMs [FLINK-2977] - Cannot access HBase in a Kerberos secured Yarn cluster [FLINK-2987] - Flink 0.10 fails to start on YARN 2.6.0 [FLINK-2989] - Job Cancel button doesn&#39;t work on Yarn [FLINK-3005] - Commons-collections object deserialization remote command execution vulnerability [FLINK-3011] - Cannot cancel failing/restarting streaming job from the command line [FLINK-3019] - CLI does not list running/restarting jobs [FLINK-3020] - Local streaming execution: set number of task manager slots to the maximum parallelism [FLINK-3024] - TimestampExtractor Does not Work When returning Long.MIN_VALUE [FLINK-3032] - Flink does not start on Hadoop 2.7.1 (HDP), due to class conflict [FLINK-3043] - Kafka Connector description in Streaming API guide is wrong/outdated [FLINK-3047] - Local batch execution: set number of task manager slots to the maximum parallelism [FLINK-3052] - Optimizer does not push properties out of bulk iterations [FLINK-2966] - Improve the way job duration is reported on web frontend. [FLINK-2974] - Add periodic offset commit to Kafka Consumer if checkpointing is disabled [FLINK-3028] - Cannot cancel restarting job via web frontend [FLINK-3040] - Add docs describing how to configure State Backends [FLINK-3041] - Twitter Streaming Description section of Streaming Programming guide refers to an incorrect example &#39;TwitterLocal&#39; `}),e.add({id:271,href:"/2015/11/16/announcing-apache-flink-0.10.0/",title:"Announcing Apache Flink 0.10.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 0.10.0 release. The community put significant effort into improving and extending Apache Flink since the last release, focusing on data stream processing and operational features. About 80 contributors provided bug fixes, improvements, and new features such that in total more than 400 JIRA issues could be resolved.
 For Flink 0.10.0, the focus of the community was to graduate the DataStream API from beta and to evolve Apache Flink into a production-ready stream data processor with a competitive feature set. These efforts resulted in support for event-time and out-of-order streams, exactly-once guarantees in the case of failures, a very flexible windowing mechanism, sophisticated operator state management, and a highly-available cluster operation mode. Flink 0.10.0 also brings a new monitoring dashboard with real-time system and job monitoring capabilities. Both batch and streaming modes of Flink benefit from the new high availability and improved monitoring features. Needless to say that Flink 0.10.0 includes many more features, improvements, and bug fixes.
 We encourage everyone to download the release and check out the documentation. Feedback through the Flink mailing lists is, as always, very welcome!
 New Features # Event-time Stream Processing # Many stream processing applications consume data from sources that produce events with associated timestamps such as sensor or user-interaction events. Very often, events have to be collected from several sources such that it is usually not guaranteed that events arrive in the exact order of their timestamps at the stream processor. Consequently, stream processors must take out-of-order elements into account in order to produce results which are correct and consistent with respect to the timestamps of the events. With release 0.10.0, Apache Flink supports event-time processing as well as ingestion-time and processing-time processing. See FLINK-2674 for details.
@@ -5184,7 +5194,7 @@
 Gelly: Major Improvements and Scala API # Gelly is Flink’s API and library for processing and analyzing large-scale graphs. Gelly was introduced with release 0.9.0 and has been very well received by users and contributors. Based on user feedback, Gelly has been improved since then. In addition, Flink 0.10.0 introduces a Scala API for Gelly. See FLINK-2857 and FLINK-1962 for details.
 More Improvements and Fixes # The Flink community resolved more than 400 issues. The following list is a selection of new features and fixed bugs.
 FLINK-1851 Java Table API does not support Casting FLINK-2152 Provide zipWithIndex utility in flink-contrib FLINK-2158 NullPointerException in DateSerializer. FLINK-2240 Use BloomFilter to minimize probe side records which are spilled to disk in Hybrid-Hash-Join FLINK-2533 Gap based random sample optimization FLINK-2555 Hadoop Input/Output Formats are unable to access secured HDFS clusters FLINK-2565 Support primitive arrays as keys FLINK-2582 Document how to build Flink with other Scala versions FLINK-2584 ASM dependency is not shaded away FLINK-2689 Reusing null object for joins with SolutionSet FLINK-2703 Remove log4j classes from fat jar / document how to use Flink with logback FLINK-2763 Bug in Hybrid Hash Join: Request to spill a partition with less than two buffers. FLINK-2767 Add support Scala 2.11 to Scala shell FLINK-2774 Import Java API classes automatically in Flink&rsquo;s Scala shell FLINK-2782 Remove deprecated features for 0.10 FLINK-2800 kryo serialization problem FLINK-2834 Global round-robin for temporary directories FLINK-2842 S3FileSystem is broken FLINK-2874 Certain Avro generated getters/setters not recognized FLINK-2895 Duplicate immutable object creation FLINK-2964 MutableHashTable fails when spilling partitions without overflow segments Notice # As previously announced, Flink 0.10.0 no longer supports Java 6. If you are still using Java 6, please consider upgrading to Java 8 (Java 7 ended its free support in April 2015). Also note that some methods in the DataStream API had to be renamed as part of the API rework. For example the groupBy method has been renamed to keyBy and the windowing API changed. This migration guide will help to port your Flink 0.9 DataStream programs to the revised API of Flink 0.10.0.
-Contributors # Alexander Alexandrov Marton Balassi Enrique Bautista Faye Beligianni Bryan Bende Ajay Bhat Chris Brinkman Dmitry Buzdin Kun Cao Paris Carbone Ufuk Celebi Shivani Chandna Liang Chen Felix Cheung Hubert Czerpak Vimal Das Behrouz Derakhshan Suminda Dharmasena Stephan Ewen Fengbin Fang Gyula Fora Lun Gao Gabor Gevay Piotr Godek Sachin Goel Anton Haglund Gábor Hermann Greg Hogan Fabian Hueske Martin Junghanns Vasia Kalavri Ulf Karlsson Frederick F. Kautz Samia Khalid Johannes Kirschnick Kostas Kloudas Alexander Kolb Johann Kovacs Aljoscha Krettek Sebastian Kruse Andreas Kunft Chengxiang Li Chen Liang Andra Lungu Suneel Marthi Tamara Mendt Robert Metzger Maximilian Michels Chiwan Park Sahitya Pavurala Pietro Pinoli Ricky Pogalz Niraj Rai Lokesh Rajaram Johannes Reifferscheid Till Rohrmann Henry Saputra Matthias Sax Shiti Saxena Chesnay Schepler Peter Schrott Saumitra Shahapure Nikolaas Steenbergen Thomas Sun Peter Szabo Viktor Taranenko Kostas Tzoumas Pieter-Jan Van Aeken Theodore Vasiloudis Timo Walther Chengxuan Wang Huang Wei Dawid Wysakowicz Rerngvit Yanggratoke Nezih Yigitbasi Ted Yu Rucong Zhang Vyacheslav Zholudev Zoltán Zvara `}),e.add({id:271,href:"/2015/09/16/off-heap-memory-in-apache-flink-and-the-curious-jit-compiler/",title:"Off-heap Memory in Apache Flink and the curious JIT compiler",section:"Flink Blog",content:`Running data-intensive code in the JVM and making it well-behaved is tricky. Systems that put billions of data objects naively onto the JVM heap face unpredictable OutOfMemoryErrors and Garbage Collection stalls. Of course, you still want to to keep your data in memory as much as possible, for speed and responsiveness of the processing applications. In that context, &ldquo;off-heap&rdquo; has become almost something like a magic word to solve these problems.
+Contributors # Alexander Alexandrov Marton Balassi Enrique Bautista Faye Beligianni Bryan Bende Ajay Bhat Chris Brinkman Dmitry Buzdin Kun Cao Paris Carbone Ufuk Celebi Shivani Chandna Liang Chen Felix Cheung Hubert Czerpak Vimal Das Behrouz Derakhshan Suminda Dharmasena Stephan Ewen Fengbin Fang Gyula Fora Lun Gao Gabor Gevay Piotr Godek Sachin Goel Anton Haglund Gábor Hermann Greg Hogan Fabian Hueske Martin Junghanns Vasia Kalavri Ulf Karlsson Frederick F. Kautz Samia Khalid Johannes Kirschnick Kostas Kloudas Alexander Kolb Johann Kovacs Aljoscha Krettek Sebastian Kruse Andreas Kunft Chengxiang Li Chen Liang Andra Lungu Suneel Marthi Tamara Mendt Robert Metzger Maximilian Michels Chiwan Park Sahitya Pavurala Pietro Pinoli Ricky Pogalz Niraj Rai Lokesh Rajaram Johannes Reifferscheid Till Rohrmann Henry Saputra Matthias Sax Shiti Saxena Chesnay Schepler Peter Schrott Saumitra Shahapure Nikolaas Steenbergen Thomas Sun Peter Szabo Viktor Taranenko Kostas Tzoumas Pieter-Jan Van Aeken Theodore Vasiloudis Timo Walther Chengxuan Wang Huang Wei Dawid Wysakowicz Rerngvit Yanggratoke Nezih Yigitbasi Ted Yu Rucong Zhang Vyacheslav Zholudev Zoltán Zvara `}),e.add({id:272,href:"/2015/09/16/off-heap-memory-in-apache-flink-and-the-curious-jit-compiler/",title:"Off-heap Memory in Apache Flink and the curious JIT compiler",section:"Flink Blog",content:`Running data-intensive code in the JVM and making it well-behaved is tricky. Systems that put billions of data objects naively onto the JVM heap face unpredictable OutOfMemoryErrors and Garbage Collection stalls. Of course, you still want to to keep your data in memory as much as possible, for speed and responsiveness of the processing applications. In that context, &ldquo;off-heap&rdquo; has become almost something like a magic word to solve these problems.
 In this blog post, we will look at how Flink exploits off-heap memory. The feature is part of the upcoming release, but you can try it out with the latest nightly builds. We will also give a few interesting insights into the behavior for Java&rsquo;s JIT compiler for highly optimized methods and loops.
 Recap: Memory Management in Flink # To understand Flink’s approach to off-heap memory, we need to recap Flink’s approach to custom managed memory. We have written an earlier blog post about how Flink manages JVM memory itself
 As a summary, the core part is that Flink implements its algorithms not against Java objects, arrays, or lists, but actually against a data structure similar to java.nio.ByteBuffer. Flink uses its own specialized version, called MemorySegment on which algorithms put and get at specific positions ints, longs, byte arrays, etc, and compare and copy memory. The memory segments are held and distributed by a central component (called MemoryManager) from which algorithms request segments according to their calculated memory budgets.
@@ -5245,7 +5255,7 @@
 Segment Time HeapMemorySegment, mixed 578 msecs HybridMemorySegment, heap, mixed 580 msecs HybridMemorySegment, off-heap, mixed 576 msecs PureHeapSegment 624 msecs PureHybridSegment, heap 576 msecs PureHybridSegment, off-heap 578 msecs Reading 100000 x 8192 ints from 32768 bytes segment
 Segment Time HeapMemorySegment, mixed 464 msecs HybridMemorySegment, heap, mixed 464 msecs HybridMemorySegment, off-heap, mixed 465 msecs PureHeapSegment 463 msecs PureHybridSegment, heap 464 msecs PureHybridSegment, off-heap 463 msecs Writing 10 x 268435456 ints to 1073741824 bytes segment
 Segment Time HeapMemorySegment, mixed 2,187 msecs HybridMemorySegment, heap, mixed 2,161 msecs HybridMemorySegment, off-heap, mixed 2,152 msecs PureHeapSegment 2,770 msecs PureHybridSegment, heap 2,161 msecs PureHybridSegment, off-heap 2,157 msecs Reading 10 x 268435456 ints from 1073741824 bytes segment
-Segment Time HeapMemorySegment, mixed 1,782 msecs HybridMemorySegment, heap, mixed 1,783 msecs HybridMemorySegment, off-heap, mixed 1,774 msecs PureHeapSegment 1,501 msecs PureHybridSegment, heap 1,774 msecs PureHybridSegment, off-heap 1,771 msecs `}),e.add({id:272,href:"/2015/09/03/announcing-flink-forward-2015/",title:"Announcing Flink Forward 2015",section:"Flink Blog",content:`Flink Forward 2015 is the first conference with Flink at its center that aims to bring together the Apache Flink community in a single place. The organizers are starting this conference in October 12 and 13 from Berlin, the place where Apache Flink started.
+Segment Time HeapMemorySegment, mixed 1,782 msecs HybridMemorySegment, heap, mixed 1,783 msecs HybridMemorySegment, off-heap, mixed 1,774 msecs PureHeapSegment 1,501 msecs PureHybridSegment, heap 1,774 msecs PureHybridSegment, off-heap 1,771 msecs `}),e.add({id:273,href:"/2015/09/03/announcing-flink-forward-2015/",title:"Announcing Flink Forward 2015",section:"Flink Blog",content:`Flink Forward 2015 is the first conference with Flink at its center that aims to bring together the Apache Flink community in a single place. The organizers are starting this conference in October 12 and 13 from Berlin, the place where Apache Flink started.
 The conference program has been announced by the organizers and a program committee consisting of Flink PMC members. The agenda contains talks from industry and academia as well as a dedicated session on hands-on Flink training.
 Some highlights of the talks include
 A keynote by William Vambenepe, lead of the product management team responsible for Big Data services on Google Cloud Platform (BigQuery, Dataflow, etc&hellip;) on data streaming, Google Cloud Dataflow, and Apache Flink.
@@ -5253,11 +5263,11 @@
 Talks on how open source projects, including Apache Mahout, Apache SAMOA (incubating), Apache Zeppelin (incubating), Apache BigTop, and Apache Storm integrate with Apache Flink.
 Talks by Flink committers on several aspects of the system, such as fault tolerance, the internal runtime architecture, and others.
 Check out the schedule and register for the conference.
-`}),e.add({id:273,href:"/2015/09/01/apache-flink-0.9.1-available/",title:"Apache Flink 0.9.1 available",section:"Flink Blog",content:`The Flink community is happy to announce that Flink 0.9.1 is now available.
+`}),e.add({id:274,href:"/2015/09/01/apache-flink-0.9.1-available/",title:"Apache Flink 0.9.1 available",section:"Flink Blog",content:`The Flink community is happy to announce that Flink 0.9.1 is now available.
 0.9.1 is a maintenance release, which includes a lot of minor fixes across several parts of the system. We suggest all users of Flink to work with this latest stable version.
 Download the release and [check out the documentation]({{ site.docs-stable }}). Feedback through the Flink mailing lists is, as always, very welcome!
 The following issues were fixed for this release:
-FLINK-1916 EOFException when running delta-iteration job FLINK-2089 &ldquo;Buffer recycled&rdquo; IllegalStateException during cancelling FLINK-2189 NullPointerException in MutableHashTable FLINK-2205 Confusing entries in JM Webfrontend Job Configuration section FLINK-2229 Data sets involving non-primitive arrays cannot be unioned FLINK-2238 Scala ExecutionEnvironment.fromCollection does not work with Sets FLINK-2248 Allow disabling of sdtout logging output FLINK-2257 Open and close of RichWindowFunctions is not called FLINK-2262 ParameterTool API misnamed function FLINK-2280 GenericTypeComparator.compare() does not respect ascending flag FLINK-2285 Active policy emits elements of the last window twice FLINK-2286 Window ParallelMerge sometimes swallows elements of the last window FLINK-2293 Division by Zero Exception FLINK-2298 Allow setting custom YARN application names through the CLI FLINK-2347 Rendering problem with Documentation website FLINK-2353 Hadoop mapred IOFormat wrappers do not respect JobConfigurable interface FLINK-2356 Resource leak in checkpoint coordinator FLINK-2361 CompactingHashTable loses entries FLINK-2362 distinct is missing in DataSet API documentation FLINK-2381 Possible class not found Exception on failed partition producer FLINK-2384 Deadlock during partition spilling FLINK-2386 Implement Kafka connector using the new Kafka Consumer API FLINK-2394 HadoopOutFormat OutputCommitter is default to FileOutputCommiter FLINK-2412 Race leading to IndexOutOfBoundsException when querying for buffer while releasing SpillablePartition FLINK-2422 Web client is showing a blank page if &ldquo;Meta refresh&rdquo; is disabled in browser FLINK-2424 InstantiationUtil.serializeObject(Object) does not close output stream FLINK-2437 TypeExtractor.analyzePojo has some problems around the default constructor detection FLINK-2442 PojoType fields not supported by field position keys FLINK-2447 TypeExtractor returns wrong type info when a Tuple has two fields of the same POJO type FLINK-2450 IndexOutOfBoundsException in KryoSerializer FLINK-2460 ReduceOnNeighborsWithExceptionITCase failure FLINK-2527 If a VertexUpdateFunction calls setNewVertexValue more than once, the MessagingFunction will only see the first value set FLINK-2540 LocalBufferPool.requestBuffer gets into infinite loop FLINK-2542 It should be documented that it is required from a join key to override hashCode(), when it is not a POJO FLINK-2555 Hadoop Input/Output Formats are unable to access secured HDFS clusters FLINK-2560 Flink-Avro Plugin cannot be handled by Eclipse FLINK-2572 Resolve base path of symlinked executable FLINK-2584 ASM dependency is not shaded away `}),e.add({id:274,href:"/2015/08/24/introducing-gelly-graph-processing-with-apache-flink/",title:"Introducing Gelly: Graph Processing with Apache Flink",section:"Flink Blog",content:`This blog post introduces Gelly, Apache Flink&rsquo;s graph-processing API and library. Flink&rsquo;s native support for iterations makes it a suitable platform for large-scale graph analytics. By leveraging delta iterations, Gelly is able to map various graph processing models such as vertex-centric or gather-sum-apply to Flink dataflows.
+FLINK-1916 EOFException when running delta-iteration job FLINK-2089 &ldquo;Buffer recycled&rdquo; IllegalStateException during cancelling FLINK-2189 NullPointerException in MutableHashTable FLINK-2205 Confusing entries in JM Webfrontend Job Configuration section FLINK-2229 Data sets involving non-primitive arrays cannot be unioned FLINK-2238 Scala ExecutionEnvironment.fromCollection does not work with Sets FLINK-2248 Allow disabling of sdtout logging output FLINK-2257 Open and close of RichWindowFunctions is not called FLINK-2262 ParameterTool API misnamed function FLINK-2280 GenericTypeComparator.compare() does not respect ascending flag FLINK-2285 Active policy emits elements of the last window twice FLINK-2286 Window ParallelMerge sometimes swallows elements of the last window FLINK-2293 Division by Zero Exception FLINK-2298 Allow setting custom YARN application names through the CLI FLINK-2347 Rendering problem with Documentation website FLINK-2353 Hadoop mapred IOFormat wrappers do not respect JobConfigurable interface FLINK-2356 Resource leak in checkpoint coordinator FLINK-2361 CompactingHashTable loses entries FLINK-2362 distinct is missing in DataSet API documentation FLINK-2381 Possible class not found Exception on failed partition producer FLINK-2384 Deadlock during partition spilling FLINK-2386 Implement Kafka connector using the new Kafka Consumer API FLINK-2394 HadoopOutFormat OutputCommitter is default to FileOutputCommiter FLINK-2412 Race leading to IndexOutOfBoundsException when querying for buffer while releasing SpillablePartition FLINK-2422 Web client is showing a blank page if &ldquo;Meta refresh&rdquo; is disabled in browser FLINK-2424 InstantiationUtil.serializeObject(Object) does not close output stream FLINK-2437 TypeExtractor.analyzePojo has some problems around the default constructor detection FLINK-2442 PojoType fields not supported by field position keys FLINK-2447 TypeExtractor returns wrong type info when a Tuple has two fields of the same POJO type FLINK-2450 IndexOutOfBoundsException in KryoSerializer FLINK-2460 ReduceOnNeighborsWithExceptionITCase failure FLINK-2527 If a VertexUpdateFunction calls setNewVertexValue more than once, the MessagingFunction will only see the first value set FLINK-2540 LocalBufferPool.requestBuffer gets into infinite loop FLINK-2542 It should be documented that it is required from a join key to override hashCode(), when it is not a POJO FLINK-2555 Hadoop Input/Output Formats are unable to access secured HDFS clusters FLINK-2560 Flink-Avro Plugin cannot be handled by Eclipse FLINK-2572 Resolve base path of symlinked executable FLINK-2584 ASM dependency is not shaded away `}),e.add({id:275,href:"/2015/08/24/introducing-gelly-graph-processing-with-apache-flink/",title:"Introducing Gelly: Graph Processing with Apache Flink",section:"Flink Blog",content:`This blog post introduces Gelly, Apache Flink&rsquo;s graph-processing API and library. Flink&rsquo;s native support for iterations makes it a suitable platform for large-scale graph analytics. By leveraging delta iterations, Gelly is able to map various graph processing models such as vertex-centric or gather-sum-apply to Flink dataflows.
 Gelly allows Flink users to perform end-to-end data analysis in a single system. Gelly can be seamlessly used with Flink&rsquo;s DataSet API, which means that pre-processing, graph creation, analysis, and post-processing can be done in the same application. At the end of this post, we will go through a step-by-step example in order to demonstrate that loading, transformation, filtering, graph creation, and analysis can be performed in a single Flink program.
 Overview
 What is Gelly? Graph Representation and Creation Transformations and Utilities Iterative Graph Processing Library of Graph Algorithms Use-Case: Music Profiles Ongoing and Future Work What is Gelly? # Gelly is a Graph API for Flink. It is currently supported in both Java and Scala. The Scala methods are implemented as wrappers on top of the basic Java operations. The API contains a set of utility functions for graph analysis, supports iterative graph processing and introduces a library of graph algorithms.
@@ -5318,7 +5328,7 @@
 Curious? Read more about our plans for Gelly in the roadmap.
 Back to top
 Links # Gelly Documentation
-`}),e.add({id:275,href:"/2015/06/24/announcing-apache-flink-0.9.0/",title:"Announcing Apache Flink 0.9.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 0.9.0 release. The release is the result of many months of hard work within the Flink community. It contains many new features and improvements which were previewed in the 0.9.0-milestone1 release and have been polished since then. This is the largest Flink release so far.
+`}),e.add({id:276,href:"/2015/06/24/announcing-apache-flink-0.9.0/",title:"Announcing Apache Flink 0.9.0",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 0.9.0 release. The release is the result of many months of hard work within the Flink community. It contains many new features and improvements which were previewed in the 0.9.0-milestone1 release and have been polished since then. This is the largest Flink release so far.
 Download the release and check out the documentation. Feedback through the Flink mailing lists is, as always, very welcome!
 New Features # Exactly-once Fault Tolerance for streaming programs # This release introduces a new fault tolerance mechanism for streaming dataflows. The new checkpointing algorithm takes data sources and also user-defined state into account and recovers failures such that all records are reflected exactly once in the operator states.
 The checkpointing algorithm is lightweight and driven by barriers that are periodically injected into the data streams at the sources. As such, it has an extremely low coordination overhead and is able to sustain very high throughput rates. User-defined state can be automatically backed up to configurable storage by the fault tolerance mechanism.
@@ -5395,14 +5405,14 @@
 FLINK-1781: Quickstarts broken due to Scala Version Variables
 Notice # The 0.9 series of Flink is the last version to support Java 6. If you are still using Java 6, please consider upgrading to Java 8 (Java 7 ended its free support in April 2015).
 Flink will require at least Java 7 in major releases after 0.9.0.
-`}),e.add({id:276,href:"/2015/05/14/april-2015-in-the-flink-community/",title:"April 2015 in the Flink community",section:"Flink Blog",content:`April was an packed month for Apache Flink.
+`}),e.add({id:277,href:"/2015/05/14/april-2015-in-the-flink-community/",title:"April 2015 in the Flink community",section:"Flink Blog",content:`April was an packed month for Apache Flink.
 Flink runner for Google Cloud Dataflow # A Flink runner for Google Cloud Dataflow was announced. See the blog posts by data Artisans and the Google Cloud Platform Blog. Google Cloud Dataflow programs can be written using and open-source SDK and run in multiple backends, either as a managed service inside Google&rsquo;s infrastructure, or leveraging open source runners, including Apache Flink.
 Flink 0.9.0-milestone1 release # The highlight of April was of course the availability of Flink 0.9-milestone1. This was a release packed with new features, including, a Python DataSet API, the new SQL-like Table API, FlinkML, a machine learning library on Flink, Gelly, FLink&rsquo;s Graph API, as well as a mode to run Flink on YARN leveraging Tez. In case you missed it, check out the release announcement blog post for details
 Conferences and meetups # April kicked off the conference season. Apache Flink was presented at ApacheCon in Texas (slides), the Hadoop Summit in Brussels featured two talks on Flink (see slides here and here), as well as at the Hadoop User Groups of the Netherlands (slides) and Stockholm. The brand new Apache Flink meetup Stockholm was also established.
 Google Summer of Code # Three students will work on Flink during Google&rsquo;s Summer of Code program on distributed pattern matching, exact and approximate statistics for data streams and windows, as well as asynchronous iterations and updates.
 Flink on the web # Fabian Hueske gave an interview at InfoQ on Apache Flink.
 Upcoming events # Stay tuned for a wealth of upcoming events! Two Flink talsk will be presented at Berlin Buzzwords, Flink will be presented at the Hadoop Summit in San Jose. A training workshop on Apache Flink is being organized in Berlin. Finally, Flink Forward, the first conference to bring together the whole Flink community is taking place in Berlin in October 2015.
-`}),e.add({id:277,href:"/2015/05/11/juggling-with-bits-and-bytes/",title:"Juggling with Bits and Bytes",section:"Flink Blog",content:` How Apache Flink operates on binary data # Nowadays, a lot of open-source systems for analyzing large data sets are implemented in Java or other JVM-based programming languages. The most well-known example is Apache Hadoop, but also newer frameworks such as Apache Spark, Apache Drill, and also Apache Flink run on JVMs. A common challenge that JVM-based data analysis engines face is to store large amounts of data in memory - both for caching and for efficient processing such as sorting and joining of data. Managing the JVM memory well makes the difference between a system that is hard to configure and has unpredictable reliability and performance and a system that behaves robustly with few configuration knobs.
+`}),e.add({id:278,href:"/2015/05/11/juggling-with-bits-and-bytes/",title:"Juggling with Bits and Bytes",section:"Flink Blog",content:` How Apache Flink operates on binary data # Nowadays, a lot of open-source systems for analyzing large data sets are implemented in Java or other JVM-based programming languages. The most well-known example is Apache Hadoop, but also newer frameworks such as Apache Spark, Apache Drill, and also Apache Flink run on JVMs. A common challenge that JVM-based data analysis engines face is to store large amounts of data in memory - both for caching and for efficient processing such as sorting and joining of data. Managing the JVM memory well makes the difference between a system that is hard to configure and has unpredictable reliability and performance and a system that behaves robustly with few configuration knobs.
 In this blog post we discuss how Apache Flink manages memory, talk about its custom data de/serialization stack, and show how it operates on binary data.
 Data Objects? Let’s put them on the heap! # The most straight-forward approach to process lots of data in a JVM is to put it as objects on the heap and operate on these objects. Caching a data set as objects would be as simple as maintaining a list containing an object for each record. An in-memory sort would simply sort the list of objects. However, this approach has a few notable drawbacks. First of all it is not trivial to watch and control heap memory usage when a lot of objects are created and invalidated constantly. Memory overallocation instantly kills the JVM with an OutOfMemoryError. Another aspect is garbage collection on multi-GB JVMs which are flooded with new objects. The overhead of garbage collection in such environments can easily reach 50% and more. Finally, Java objects come with a certain space overhead depending on the JVM and platform. For data sets with many small objects this can significantly reduce the effectively usable amount of memory. Given proficient system design and careful, use-case specific system parameter tuning, heap memory usage can be more or less controlled and OutOfMemoryErrors avoided. However, such setups are rather fragile especially if data characteristics or the execution environment change.
 What is Flink doing about that? # Apache Flink has its roots at a research project which aimed to combine the best technologies of MapReduce-based systems and parallel database systems. Coming from this background, Flink has always had its own way of processing data in-memory. Instead of putting lots of objects on the heap, Flink serializes objects into a fixed number of pre-allocated memory segments. Its DBMS-style sort and join algorithms operate as much as possible on this binary data to keep the de/serialization overhead at a minimum. If more data needs to be processed than can be kept in memory, Flink’s operators partially spill data to disk. In fact, a lot of Flink’s internal implementations look more like C/C++ rather than common Java. The following figure gives a high-level overview of how Flink stores data serialized in memory segments and spills to disk if necessary.
@@ -5430,7 +5440,7 @@
 Occupied Memory Object-on-Heap Flink-Serialized Kryo-Serialized Sort on Integer approx. 700 MB (heap) 277 MB (sort buffer) 266 MB (sort buffer) Sort on String approx. 700 MB (heap) 315 MB (sort buffer) 266 MB (sort buffer) To summarize, the experiments verify the previously stated benefits of operating on binary data.
 We’re not done yet! # Apache Flink features quite a bit of advanced techniques to safely and efficiently process huge amounts of data with limited memory resources. However, there are a few points that could make Flink even more efficient. The Flink community is working on moving the managed memory to off-heap memory. This will allow for smaller JVMs, lower garbage collection overhead, and also easier system configuration. With Flink’s Table API, the semantics of all operations such as aggregations and projections are known (in contrast to black-box user-defined functions). Hence we can generate code for Table API operations that directly operates on binary data. Further improvements include serialization layouts which are tailored towards the operations that are applied on the binary data and code generation for serializers and comparators.
 The groundwork (and a lot more) for operating on binary data is done but there is still some room for making Flink even better and faster. If you are crazy about performance and like to juggle with lot of bits and bytes, join the Flink community!
-TL;DR; Give me three things to remember! # Flink’s active memory management avoids nasty OutOfMemoryErrors that kill your JVMs and reduces garbage collection overhead. Flink features a highly efficient data de/serialization stack that facilitates operations on binary data and makes more data fit into memory. Flink’s DBMS-style operators operate natively on binary data yielding high performance in-memory and destage gracefully to disk if necessary. `}),e.add({id:278,href:"/2015/04/13/announcing-flink-0.9.0-milestone1-preview-release/",title:"Announcing Flink 0.9.0-milestone1 preview release",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 0.9.0-milestone-1 release. The release is a preview of the upcoming 0.9.0 release. It contains many new features which will be available in the upcoming 0.9 release. Interested users are encouraged to try it out and give feedback. As the version number indicates, this release is a preview release that contains known issues.
+TL;DR; Give me three things to remember! # Flink’s active memory management avoids nasty OutOfMemoryErrors that kill your JVMs and reduces garbage collection overhead. Flink features a highly efficient data de/serialization stack that facilitates operations on binary data and makes more data fit into memory. Flink’s DBMS-style operators operate natively on binary data yielding high performance in-memory and destage gracefully to disk if necessary. `}),e.add({id:279,href:"/2015/04/13/announcing-flink-0.9.0-milestone1-preview-release/",title:"Announcing Flink 0.9.0-milestone1 preview release",section:"Flink Blog",content:`The Apache Flink community is pleased to announce the availability of the 0.9.0-milestone-1 release. The release is a preview of the upcoming 0.9.0 release. It contains many new features which will be available in the upcoming 0.9 release. Interested users are encouraged to try it out and give feedback. As the version number indicates, this release is a preview release that contains known issues.
 You can download the release here and check out the latest documentation here. Feedback through the Flink mailing lists is, as always, very welcome!
 New Features # Table API # Flink’s new Table API offers a higher-level abstraction for interacting with structured data sources. The Table API allows users to execute logical, SQL-like queries on distributed data sets while allowing them to freely mix declarative queries with regular Flink operators. Here is an example that groups and joins two tables:
 val clickCounts = clicks .groupBy(&#39;user).select(&#39;userId, &#39;url.count as &#39;count) val activeUsers = users.join(clickCounts) .where(&#39;id === &#39;userId &amp;&amp; &#39;count &gt; 10).select(&#39;username, &#39;count, ...) Tables consist of logical attributes that can be selected by name rather than physical Java and Scala data types. This alleviates a lot of boilerplate code for common ETL tasks and raises the abstraction for Flink programs. Tables are available for both static and streaming data sources (DataSet and DataStream APIs).
@@ -5469,7 +5479,7 @@
 FLINK-1105: Add support for locally sorted output
 FLINK-1688: Add socket sink
 FLINK-1436: Improve usability of command line interface
-`}),e.add({id:279,href:"/2015/04/07/march-2015-in-the-flink-community/",title:"March 2015 in the Flink community",section:"Flink Blog",content:`March has been a busy month in the Flink community.
+`}),e.add({id:280,href:"/2015/04/07/march-2015-in-the-flink-community/",title:"March 2015 in the Flink community",section:"Flink Blog",content:`March has been a busy month in the Flink community.
 Scaling ALS # Flink committers employed at data Artisans published a blog post on how they scaled matrix factorization with Flink and Google Compute Engine to matrices with 28 billion elements.
 Learn about the internals of Flink # The community has started an effort to better document the internals of Flink. Check out the first articles on the Flink wiki on how Flink manages memory, how tasks in Flink exchange data, type extraction and serialization in Flink, as well as how Flink builds on Akka for distributed coordination.
 Check out also the new blog post on how Flink executes joins with several insights into Flink&rsquo;s runtime.
@@ -5477,7 +5487,7 @@
 In the Flink master # Table API in Scala and Java # The new Table API in Flink is now available in both Java and Scala. Check out the examples here (Java) and here (Scala).
 Additions to the Machine Learning library # Flink&rsquo;s Machine Learning library is seeing quite a bit of traction. Recent additions include the CoCoA algorithm for distributed optimization.
 Exactly-once delivery guarantees for streaming jobs # Flink streaming jobs now provide exactly once processing guarantees when coupled with persistent sources (notably Apache Kafka). Flink periodically checkpoints and persists the offsets of the sources and restarts from those checkpoints at failure recovery. This functionality is currently limited in that it does not yet handle large state and iterative programs.
-`}),e.add({id:280,href:"/2015/03/13/peeking-into-apache-flinks-engine-room/",title:"Peeking into Apache Flink's Engine Room",section:"Flink Blog",content:` Join Processing in Apache Flink # Joins are prevalent operations in many data processing applications. Most data processing systems feature APIs that make joining data sets very easy. However, the internal algorithms for join processing are much more involved – especially if large data sets need to be efficiently handled. Therefore, join processing serves as a good example to discuss the salient design points and implementation details of a data processing system.
+`}),e.add({id:281,href:"/2015/03/13/peeking-into-apache-flinks-engine-room/",title:"Peeking into Apache Flink's Engine Room",section:"Flink Blog",content:` Join Processing in Apache Flink # Joins are prevalent operations in many data processing applications. Most data processing systems feature APIs that make joining data sets very easy. However, the internal algorithms for join processing are much more involved – especially if large data sets need to be efficiently handled. Therefore, join processing serves as a good example to discuss the salient design points and implementation details of a data processing system.
 In this blog post, we cut through Apache Flink’s layered architecture and take a look at its internals with a focus on how it handles joins. Specifically, I will
 show how easy it is to join data sets using Flink’s fluent APIs, discuss basic distributed join strategies, Flink’s join implementations, and its memory management, talk about Flink’s optimizer that automatically chooses join strategies, show some performance numbers for joining data sets of different sizes, and finally briefly discuss joining of co-located and pre-sorted data sets. Disclaimer: This blog post is exclusively about equi-joins. Whenever I say “join” in the following, I actually mean “equi-join”.
 How do I join with Flink? # Flink provides fluent APIs in Java and Scala to write data flow programs. Flink’s APIs are centered around parallel data collections which are called data sets. data sets are processed by applying Transformations that compute new data sets. Flink’s transformations include Map and Reduce as known from MapReduce [1] but also operators for joining, co-grouping, and iterative processing. The documentation gives an overview of all available transformations [2].
@@ -5508,7 +5518,7 @@
 As in the single-core benchmark, we run 1:N joins, generate the data on-the-fly, and immediately discard the result after the join. We run the benchmark on 10 n1-highmem-8 Google Compute Engine instances. Each instance is equipped with 8 cores, 52GB RAM, 40GB of which are configured as working memory (5GB per core), and one local SSD for spilling to disk. All benchmarks are performed using the same configuration, i.e., no fine tuning for the respective data sizes is done. The programs are executed with a parallelism of 80.
 As expected, the Broadcast-Forward strategy performs best for very small inputs because the large probe side is not shipped over the network and is locally joined. However, when the size of the broadcasted side grows, two problems arise. First the amount of data which is shipped increases but also each parallel instance has to process the full broadcasted data set. The performance of both Repartitioning strategies behaves similar for growing input sizes which indicates that these strategies are mainly limited by the cost of the data transfer (at max 2TB are shipped over the network and joined). Although the Sort-Merge-Join strategy shows the worst performance all shown cases, it has a right to exist because it can nicely exploit sorted input data.
 I’ve got sooo much data to join, do I really need to ship it? # We have seen that off-the-shelf distributed joins work really well in Flink. But what if your data is so huge that you do not want to shuffle it across your cluster? We recently added some features to Flink for specifying semantic properties (partitioning and sorting) on input splits and co-located reading of local input files. With these tools at hand, it is possible to join pre-partitioned data sets from your local filesystem without sending a single byte over your cluster’s network. If the input data is even pre-sorted, the join can be done as a Sort-Merge-Join without sorting, i.e., the join is essentially done on-the-fly. Exploiting co-location requires a very special setup though. Data needs to be stored on the local filesystem because HDFS does not feature data co-location and might move file blocks across data nodes. That means you need to take care of many things yourself which HDFS would have done for you, including replication to avoid data loss. On the other hand, performance gains of joining co-located and pre-sorted can be quite substantial.
-tl;dr: What should I remember from all of this? # Flink’s fluent Scala and Java APIs make joins and other data transformations easy as cake. The optimizer does the hard choices for you, but gives you control in case you know better. Flink’s join implementations perform very good in-memory and gracefully degrade when going to disk. Due to Flink’s robust memory management, there is no need for job- or data-specific memory tuning to avoid a nasty OutOfMemoryException. It just runs out-of-the-box. References # [1] “MapReduce: Simplified data processing on large clusters”, Dean, Ghemawat, 2004 [2] Flink 0.8.1 documentation: Data Transformations [3] Flink 0.8.1 documentation: Joins [4] Flink 1.0 documentation: Semantic annotations [5] Flink 1.0 documentation: Optimizer join hints `}),e.add({id:281,href:"/2015/03/02/february-2015-in-the-flink-community/",title:"February 2015 in the Flink community",section:"Flink Blog",content:`February might be the shortest month of the year, but this does not mean that the Flink community has not been busy adding features to the system and fixing bugs. Here’s a rundown of the activity in the Flink community last month.
+tl;dr: What should I remember from all of this? # Flink’s fluent Scala and Java APIs make joins and other data transformations easy as cake. The optimizer does the hard choices for you, but gives you control in case you know better. Flink’s join implementations perform very good in-memory and gracefully degrade when going to disk. Due to Flink’s robust memory management, there is no need for job- or data-specific memory tuning to avoid a nasty OutOfMemoryException. It just runs out-of-the-box. References # [1] “MapReduce: Simplified data processing on large clusters”, Dean, Ghemawat, 2004 [2] Flink 0.8.1 documentation: Data Transformations [3] Flink 0.8.1 documentation: Joins [4] Flink 1.0 documentation: Semantic annotations [5] Flink 1.0 documentation: Optimizer join hints `}),e.add({id:282,href:"/2015/03/02/february-2015-in-the-flink-community/",title:"February 2015 in the Flink community",section:"Flink Blog",content:`February might be the shortest month of the year, but this does not mean that the Flink community has not been busy adding features to the system and fixing bugs. Here’s a rundown of the activity in the Flink community last month.
 0.8.1 release # Flink 0.8.1 was released. This bugfixing release resolves a total of 22 issues.
 New committer # Max Michels has been voted a committer by the Flink PMC.
 Flink adapter for Apache SAMOA # Apache SAMOA (incubating) is a distributed streaming machine learning (ML) framework with a programming abstraction for distributed streaming ML algorithms. SAMOA runs on a variety of backend engines, currently Apache Storm and Apache S4. A pull request is available at the SAMOA repository that adds a Flink adapter for SAMOA.
@@ -5524,7 +5534,7 @@
 Flink Expressions # The newly merged flink-table module is the first step in Flink’s roadmap towards logical queries and SQL support. Here’s a preview on how you can read two CSV file, assign a logical schema to, and apply transformations like filters and joins using logical attributes rather than physical data types.
 val customers = getCustomerDataSet(env) .as(&#39;id, &#39;mktSegment) .filter( &#39;mktSegment === &#34;AUTOMOBILE&#34; ) val orders = getOrdersDataSet(env) .filter( o =&gt; dateFormat.parse(o.orderDate).before(date) ) .as(&#39;orderId, &#39;custId, &#39;orderDate, &#39;shipPrio) val items = orders.join(customers) .where(&#39;custId === &#39;id) .select(&#39;orderId, &#39;orderDate, &#39;shipPrio) Access to HCatalog tables # With the flink-hcatalog module, you can now conveniently access HCatalog/Hive tables. The module supports projection (selection and order of fields) and partition filters.
 Access to secured YARN clusters/HDFS. # With this change users can access Kerberos secured YARN (and HDFS) Hadoop clusters. Also, basic support for accessing secured HDFS with a standalone Flink setup is now available.
-`}),e.add({id:282,href:"/2015/02/09/introducing-flink-streaming/",title:"Introducing Flink Streaming",section:"Flink Blog",content:`This post is the first of a series of blog posts on Flink Streaming, the recent addition to Apache Flink that makes it possible to analyze continuous data sources in addition to static files. Flink Streaming uses the pipelined Flink engine to process data streams in real time and offers a new API including definition of flexible windows.
+`}),e.add({id:283,href:"/2015/02/09/introducing-flink-streaming/",title:"Introducing Flink Streaming",section:"Flink Blog",content:`This post is the first of a series of blog posts on Flink Streaming, the recent addition to Apache Flink that makes it possible to analyze continuous data sources in addition to static files. Flink Streaming uses the pipelined Flink engine to process data streams in real time and offers a new API including definition of flexible windows.
 In this post, we go through an example that uses the Flink Streaming API to compute statistics on stock market data that arrive continuously and combine the stock market data with Twitter streams. See the Streaming Programming Guide for a detailed presentation of the Streaming API.
 First, we read a bunch of stock price streams and combine them into one stream of market data. We apply several transformations on this market data stream, like rolling aggregations per stock. Then we emit price warning alerts when the prices are rapidly changing. Moving towards more advanced features, we compute rolling correlations between the market data streams and a Twitter stream with stock mentions.
 For running the example implementation please use the 0.9-SNAPSHOT version of Flink as a dependency. The full example code base can be found here in Scala and here in Java7.
@@ -5548,7 +5558,7 @@
 Upcoming for streaming # There are some aspects of Flink Streaming that are subjects to change by the next release making this application look even nicer.
 Stay tuned for later blog posts on how Flink Streaming works internally, fault tolerance, and performance measurements!
 Back to top
-`}),e.add({id:283,href:"/2015/02/04/january-2015-in-the-flink-community/",title:"January 2015 in the Flink community",section:"Flink Blog",content:`Happy 2015! Here is a (hopefully digestible) summary of what happened last month in the Flink community.
+`}),e.add({id:284,href:"/2015/02/04/january-2015-in-the-flink-community/",title:"January 2015 in the Flink community",section:"Flink Blog",content:`Happy 2015! Here is a (hopefully digestible) summary of what happened last month in the Flink community.
 0.8.0 release # Flink 0.8.0 was released. See here for the release notes.
 Flink roadmap # The community has published a roadmap for 2015 on the Flink wiki. Check it out to see what is coming up in Flink, and pick up an issue to contribute!
 Articles in the press # The Apache Software Foundation announced Flink as a Top-Level Project. The announcement was picked up by the media, e.g., here, here, and here.
@@ -5559,7 +5569,7 @@
 Gelly, Flink’s Graph API # This pull request introduces Gelly, Flink’s brand new Graph API. Gelly offers a native graph programming abstraction with functionality for vertex-centric programming, as well as available graph algorithms. See this slide set for an overview of Gelly.
 Semantic annotations # Semantic annotations are a powerful mechanism to expose information about the behavior of Flink functions to Flink’s optimizer. The optimizer can leverage this information to generate more efficient execution plans. For example the output of a Reduce operator that groups on the second field of a tuple is still partitioned on that field if the Reduce function does not modify the value of the second field. By exposing this information to the optimizer, the optimizer can generate plans that avoid expensive data shuffling and reuse the partitioned output of Reduce. Semantic annotations can be defined for most data types, including (nested) tuples and POJOs. See the snapshot documentation for details (not online yet).
 New YARN client # The improved YARN client of Flink now allows users to deploy Flink on YARN for executing a single job. Older versions only supported a long-running YARN session. The code of the YARN client has been refactored to provide an (internal) Java API for controlling YARN clusters more easily.
-`}),e.add({id:284,href:"/2015/01/21/apache-flink-0.8.0-available/",title:"Apache Flink 0.8.0 available",section:"Flink Blog",content:`We are pleased to announce the availability of Flink 0.8.0. This release includes new user-facing features as well as performance and bug fixes, extends the support for filesystems and introduces the Scala API and flexible windowing semantics for Flink Streaming. A total of 33 people have contributed to this release, a big thanks to all of them!
+`}),e.add({id:285,href:"/2015/01/21/apache-flink-0.8.0-available/",title:"Apache Flink 0.8.0 available",section:"Flink Blog",content:`We are pleased to announce the availability of Flink 0.8.0. This release includes new user-facing features as well as performance and bug fixes, extends the support for filesystems and introduces the Scala API and flexible windowing semantics for Flink Streaming. A total of 33 people have contributed to this release, a big thanks to all of them!
 Download Flink 0.8.0
 See the release changelog
 Overview of major new features # Extended filesystem support: The former DistributedFileSystem interface has been generalized to HadoopFileSystem now supporting all sub classes of org.apache.hadoop.fs.FileSystem. This allows users to use all file systems supported by Hadoop with Apache Flink. See connecting to other systems
@@ -5570,7 +5580,7 @@
 Improved input split assignment which maximizes computation locality Smart broadcasting mechanism which minimizes network I/O Custom partitioners which let the user control how the data is partitioned within the cluster. This helps to prevent data skewness and allows to implement highly efficient algorithms. coGroup operator now supports group sorting for its inputs Kryo is the new fallback serializer: Apache Flink has a sophisticated type analysis and serialization framework that is able to handle commonly used types very efficiently. In addition to that, there is a fallback serializer for types which are not supported. Older versions of Flink used the reflective Avro serializer for that purpose. With this release, Flink is using the powerful Kryo and twitter-chill library for support of types such as Java Collections and Scala specifc types.
 Hadoop 2.2.0+ is now the default Hadoop dependency: With Flink 0.8.0 we made the “hadoop2” build profile the default build for Flink. This means that all users using Hadoop 1 (0.2X or 1.2.X versions) have to specify version “0.8.0-hadoop1” in their pom files.
 HBase module updated The HBase version has been updated to 0.98.6.1. Also, Hbase is now available to the Hadoop1 and Hadoop2 profile of Flink.
-Contributors # Marton Balassi Daniel Bali Carsten Brandt Moritz Borgmann Stefan Bunk Paris Carbone Ufuk Celebi Nils Engelbach Stephan Ewen Gyula Fora Gabor Hermann Fabian Hueske Vasiliki Kalavri Johannes Kirschnick Aljoscha Krettek Suneel Marthi Robert Metzger Felix Neutatz Chiwan Park Flavio Pompermaier Mingliang Qi Shiva Teja Reddy Till Rohrmann Henry Saputra Kousuke Saruta Chesney Schepler Erich Schubert Peter Szabo Jonas Traub Kostas Tzoumas Timo Walther Daniel Warneke Chen Xu `}),e.add({id:285,href:"/2015/01/06/december-2014-in-the-flink-community/",title:"December 2014 in the Flink community",section:"Flink Blog",content:`This is the first blog post of a “newsletter” like series where we give a summary of the monthly activity in the Flink community. As the Flink project grows, this can serve as a &ldquo;tl;dr&rdquo; for people that are not following the Flink dev and user mailing lists, or those that are simply overwhelmed by the traffic.
+Contributors # Marton Balassi Daniel Bali Carsten Brandt Moritz Borgmann Stefan Bunk Paris Carbone Ufuk Celebi Nils Engelbach Stephan Ewen Gyula Fora Gabor Hermann Fabian Hueske Vasiliki Kalavri Johannes Kirschnick Aljoscha Krettek Suneel Marthi Robert Metzger Felix Neutatz Chiwan Park Flavio Pompermaier Mingliang Qi Shiva Teja Reddy Till Rohrmann Henry Saputra Kousuke Saruta Chesney Schepler Erich Schubert Peter Szabo Jonas Traub Kostas Tzoumas Timo Walther Daniel Warneke Chen Xu `}),e.add({id:286,href:"/2015/01/06/december-2014-in-the-flink-community/",title:"December 2014 in the Flink community",section:"Flink Blog",content:`This is the first blog post of a “newsletter” like series where we give a summary of the monthly activity in the Flink community. As the Flink project grows, this can serve as a &ldquo;tl;dr&rdquo; for people that are not following the Flink dev and user mailing lists, or those that are simply overwhelmed by the traffic.
 Flink graduation # The biggest news is that the Apache board approved Flink as a top-level Apache project! The Flink team is working closely with the Apache press team for an official announcement, so stay tuned for details!
 New Flink website # The Flink website got a total make-over, both in terms of appearance and content.
 Flink IRC channel # A new IRC channel called #flink was created at irc.freenode.org. An easy way to access the IRC channel is through the web client. Feel free to stop by to ask anything or share your ideas about Apache Flink!
@@ -5584,7 +5594,7 @@
 Kryo Serialization as the new default fallback # Flink’s build-in type serialization framework is handles all common types very efficiently. Prior versions uses Avro to serialize types that the built-in framework could not handle. Flink serialization system improved a lot over time and by now surpasses the capabilities of Avro in many cases. Kryo now serves as the default fallback serialization framework, supporting a much broader range of types.
 Hadoop FileSystem support # This change permits users to use all file systems supported by Hadoop with Flink. In practice this means that users can use Flink with Tachyon, Google Cloud Storage (also out of the box Flink YARN support on Google Compute Cloud), FTP and all the other file system implementations for Hadoop.
 Heading to the 0.8.0 release # The community is working hard together with the Apache infra team to migrate the Flink infrastructure to a top-level project. At the same time, the Flink community is working on the Flink 0.8.0 release which should be out very soon.
-`}),e.add({id:286,href:"/2014/11/18/hadoop-compatibility-in-flink/",title:"Hadoop Compatibility in Flink",section:"Flink Blog",content:`Apache Hadoop is an industry standard for scalable analytical data processing. Many data analysis applications have been implemented as Hadoop MapReduce jobs and run in clusters around the world. Apache Flink can be an alternative to MapReduce and improves it in many dimensions. Among other features, Flink provides much better performance and offers APIs in Java and Scala, which are very easy to use. Similar to Hadoop, Flink’s APIs provide interfaces for Mapper and Reducer functions, as well as Input- and OutputFormats along with many more operators. While being conceptually equivalent, Hadoop’s MapReduce and Flink’s interfaces for these functions are unfortunately not source compatible.
+`}),e.add({id:287,href:"/2014/11/18/hadoop-compatibility-in-flink/",title:"Hadoop Compatibility in Flink",section:"Flink Blog",content:`Apache Hadoop is an industry standard for scalable analytical data processing. Many data analysis applications have been implemented as Hadoop MapReduce jobs and run in clusters around the world. Apache Flink can be an alternative to MapReduce and improves it in many dimensions. Among other features, Flink provides much better performance and offers APIs in Java and Scala, which are very easy to use. Similar to Hadoop, Flink’s APIs provide interfaces for Mapper and Reducer functions, as well as Input- and OutputFormats along with many more operators. While being conceptually equivalent, Hadoop’s MapReduce and Flink’s interfaces for these functions are unfortunately not source compatible.
 Flink’s Hadoop Compatibility Package # To close this gap, Flink provides a Hadoop Compatibility package to wrap functions implemented against Hadoop’s MapReduce interfaces and embed them in Flink programs. This package was developed as part of a Google Summer of Code 2014 project.
 With the Hadoop Compatibility package, you can reuse all your Hadoop
 InputFormats (mapred and mapreduce APIs) OutputFormats (mapred and mapreduce APIs) Mappers (mapred API) Reducers (mapred API) in Flink programs without changing a line of code. Moreover, Flink also natively supports all Hadoop data types (Writables and WritableComparable).
@@ -5594,7 +5604,7 @@
 What comes next? # While the Hadoop compatibility package is already very useful, we are currently working on a dedicated Hadoop Job operation to embed and execute Hadoop jobs as a whole in Flink programs, including their custom partitioning, sorting, and grouping code. With this feature, you will be able to chain multiple Hadoop jobs, mix them with Flink functions, and other operations such as Spargel operations (Pregel/Giraph-style jobs).
 Summary # Flink lets you reuse a lot of the code you wrote for Hadoop MapReduce, including all data types, all Input- and OutputFormats, and Mapper and Reducers of the mapred-API. Hadoop functions can be used within Flink programs and mixed with all other Flink functions. Due to Flink’s pipelined execution, Hadoop functions can arbitrarily be assembled without data exchange via HDFS. Moreover, the Flink community is currently working on a dedicated Hadoop Job operation to supporting the execution of Hadoop jobs as a whole.
 If you want to use Flink’s Hadoop compatibility package checkout our documentation.
-`}),e.add({id:287,href:"/2014/11/04/apache-flink-0.7.0-available/",title:"Apache Flink 0.7.0 available",section:"Flink Blog",content:`We are pleased to announce the availability of Flink 0.7.0. This release includes new user-facing features as well as performance and bug fixes, brings the Scala and Java APIs in sync, and introduces Flink Streaming. A total of 34 people have contributed to this release, a big thanks to all of them!
+`}),e.add({id:288,href:"/2014/11/04/apache-flink-0.7.0-available/",title:"Apache Flink 0.7.0 available",section:"Flink Blog",content:`We are pleased to announce the availability of Flink 0.7.0. This release includes new user-facing features as well as performance and bug fixes, brings the Scala and Java APIs in sync, and introduces Flink Streaming. A total of 34 people have contributed to this release, a big thanks to all of them!
 Download Flink 0.7.0 here
 See the release changelog here
 Overview of major new features # Flink Streaming: The gem of the 0.7.0 release is undoubtedly Flink Streaming. Available currently in alpha, Flink Streaming provides a Java API on top of Apache Flink that can consume streaming data sources (e.g., from Apache Kafka, Apache Flume, and others) and process them in real time. A dedicated blog post on Flink Streaming and its performance is coming up here soon. You can check out the Streaming programming guide here.
@@ -5606,7 +5616,7 @@
 BLOB service: This release contains a new service to distribute jar files and other binary data among the JobManager, TaskManagers and the client.
 Intermediate data sets: A major rewrite of the system internals introduces intermediate data sets as first class citizens. The internal state machine that tracks the distributed tasks has also been completely rewritten for scalability. While this is not visible as a user-facing feature yet, it is the foundation for several upcoming exciting features.
 Note: Currently, there is limited support for Java 8 lambdas when compiling and running from an IDE. The problem is due to type erasure and whether Java compilers retain type information. We are currently working with the Eclipse and OpenJDK communities to resolve this.
-Contributors # Tamas Ambrus Mariem Ayadi Marton Balassi Daniel Bali Ufuk Celebi Hung Chang David Eszes Stephan Ewen Judit Feher Gyula Fora Gabor Hermann Fabian Hueske Vasiliki Kalavri Kristof Kovacs Aljoscha Krettek Sebastian Kruse Sebastian Kunert Matyas Manninger Robert Metzger Mingliang Qi Till Rohrmann Henry Saputra Chesnay Schelper Moritz Schubotz Hung Sendoh Chang Peter Szabo Jonas Traub Fabian Tschirschnitz Artem Tsikiridis Kostas Tzoumas Timo Walther Daniel Warneke Tobias Wiens Yingjun Wu `}),e.add({id:288,href:"/2014/10/03/upcoming-events/",title:"Upcoming Events",section:"Flink Blog",content:`We are happy to announce several upcoming Flink events both in Europe and the US. Starting with a Flink hackathon in Stockholm (Oct 8-9) and a talk about Flink at the Stockholm Hadoop User Group (Oct 8). This is followed by the very first Flink Meetup in Berlin (Oct 15). In the US, there will be two Flink Meetup talks: the first one at the Pasadena Big Data User Group (Oct 29) and the second one at Silicon Valley Hands On Programming Events (Nov 4).
+Contributors # Tamas Ambrus Mariem Ayadi Marton Balassi Daniel Bali Ufuk Celebi Hung Chang David Eszes Stephan Ewen Judit Feher Gyula Fora Gabor Hermann Fabian Hueske Vasiliki Kalavri Kristof Kovacs Aljoscha Krettek Sebastian Kruse Sebastian Kunert Matyas Manninger Robert Metzger Mingliang Qi Till Rohrmann Henry Saputra Chesnay Schelper Moritz Schubotz Hung Sendoh Chang Peter Szabo Jonas Traub Fabian Tschirschnitz Artem Tsikiridis Kostas Tzoumas Timo Walther Daniel Warneke Tobias Wiens Yingjun Wu `}),e.add({id:289,href:"/2014/10/03/upcoming-events/",title:"Upcoming Events",section:"Flink Blog",content:`We are happy to announce several upcoming Flink events both in Europe and the US. Starting with a Flink hackathon in Stockholm (Oct 8-9) and a talk about Flink at the Stockholm Hadoop User Group (Oct 8). This is followed by the very first Flink Meetup in Berlin (Oct 15). In the US, there will be two Flink Meetup talks: the first one at the Pasadena Big Data User Group (Oct 29) and the second one at Silicon Valley Hands On Programming Events (Nov 4).
 We are looking forward to seeing you at any of these events. The following is an overview of each event and links to the respective Meetup pages.
 Flink Hackathon, Stockholm (Oct 8-9) # The hackathon will take place at KTH/SICS from Oct 8th-9th. You can sign up here: https://docs.google.com/spreadsheet/viewform?formkey=dDZnMlRtZHJ3Z0hVTlFZVjU2MWtoX0E6MA.
 Here is a rough agenda and a list of topics to work upon or look into. Suggestions and more topics are welcome.
@@ -5630,10 +5640,10 @@
 http://www.meetup.com/Apache-Flink-Meetup/events/208227422/
 Meetup: Pasadena Big Data User Group (Oct 29) # http://www.meetup.com/Pasadena-Big-Data-Users-Group/
 Meetup: Silicon Valley Hands On Programming Events (Nov 4) # http://www.meetup.com/HandsOnProgrammingEvents/events/210504392/
-`}),e.add({id:289,href:"/2014/09/26/apache-flink-0.6.1-available/",title:"Apache Flink 0.6.1 available",section:"Flink Blog",content:`We are happy to announce the availability of Flink 0.6.1.
+`}),e.add({id:290,href:"/2014/09/26/apache-flink-0.6.1-available/",title:"Apache Flink 0.6.1 available",section:"Flink Blog",content:`We are happy to announce the availability of Flink 0.6.1.
 0.6.1 is a maintenance release, which includes minor fixes across several parts of the system. We suggest all users of Flink to work with this newest version.
 Download the release today.
-`}),e.add({id:290,href:"/2014/08/26/apache-flink-0.6-available/",title:"Apache Flink 0.6 available",section:"Flink Blog",content:`We are happy to announce the availability of Flink 0.6. This is the first release of the system inside the Apache Incubator and under the name Flink. Releases up to 0.5 were under the name Stratosphere, the academic and open source project that Flink originates from.
+`}),e.add({id:291,href:"/2014/08/26/apache-flink-0.6-available/",title:"Apache Flink 0.6 available",section:"Flink Blog",content:`We are happy to announce the availability of Flink 0.6. This is the first release of the system inside the Apache Incubator and under the name Flink. Releases up to 0.5 were under the name Stratosphere, the academic and open source project that Flink originates from.
 What is Flink? # Apache Flink is a general-purpose data processing engine for clusters. It runs on YARN clusters on top of data stored in Hadoop, as well as stand-alone. Flink currently has programming APIs in Java and Scala. Jobs are executed via Flink&rsquo;s own runtime engine. Flink features:
 Robust in-memory and out-of-core processing: once read, data stays in memory as much as possible, and is gracefully de-staged to disk in the presence of memory pressure from limited memory or other applications. The runtime is designed to perform very well both in setups with abundant memory and in setups where memory is scarce.
 POJO-based APIs: when programming, you do not have to pack your data into key-value pairs or some other framework-specific data model. Rather, you can use arbitrary Java and Scala types to model your data.
@@ -5643,7 +5653,7 @@
 Release 0.6 # Flink 0.6 builds on the latest Stratosphere 0.5 release. It includes many bug fixes and improvements that make the system more stable and robust, as well as breaking API changes.
 The full release notes are available here.
 Download the release here.
-Contributors # Wilson Cao Ufuk Celebi Stephan Ewen Jonathan Hasenburg Markus Holzemer Fabian Hueske Sebastian Kunert Vikhyat Korrapati Aljoscha Krettek Sebastian Kruse Raymond Liu Robert Metzger Mingliang Qi Till Rohrmann Henry Saputra Chesnay Schepler Kostas Tzoumas Robert Waury Timo Walther Daniel Warneke Tobias Wiens `}),e.add({id:291,href:"/how-to-contribute/code-style-and-quality-common/",title:"Code Style and Quality Guide — Common Rules",section:"How to Contribute",content:` Code Style and Quality Guide — Common Rules # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # 1. Copyright # Each file must include the Apache license information as a header.
+Contributors # Wilson Cao Ufuk Celebi Stephan Ewen Jonathan Hasenburg Markus Holzemer Fabian Hueske Sebastian Kunert Vikhyat Korrapati Aljoscha Krettek Sebastian Kruse Raymond Liu Robert Metzger Mingliang Qi Till Rohrmann Henry Saputra Chesnay Schepler Kostas Tzoumas Robert Waury Timo Walther Daniel Warneke Tobias Wiens `}),e.add({id:292,href:"/how-to-contribute/code-style-and-quality-common/",title:"Code Style and Quality Guide — Common Rules",section:"How to Contribute",content:` Code Style and Quality Guide — Common Rules # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # 1. Copyright # Each file must include the Apache license information as a header.
 /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * &#34;License&#34;); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an &#34;AS IS&#34; BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 2. Tools # We recommend to follow the IDE Setup Guide to get IDE tooling configured.
 Warnings # We strive for zero warnings Even though there are many warnings in existing code, new changes should not add any additional compiler warnings If it is not possible to address the warning in a sane way (in some cases when working with generics) add an annotation to suppress the warning When deprecating methods, check that this does not introduce additional warnings 3. Comments And Code Readability # Comments # Golden rule: Comment as much as necessary to support code understanding, but don’t add redundant information.
 Think about
@@ -5692,7 +5702,7 @@
 https://github.com/apache/flink/blob/master/flink-core/src/test/java/org/apache/flink/util/LinkedOptionalMapTest.java https://github.com/apache/flink/blob/master/flink-filesystems/flink-s3-fs-base/src/test/java/org/apache/flink/fs/s3/common/writer/RecoverableMultiPartUploadImplTest.java Avoid Mockito - Use reusable test implementations # Mockito-based tests tend to be costly to maintain in the long run by encouraging duplication of functionality and testing for implementation rather than effect More details: https://docs.google.com/presentation/d/1fZlTjOJscwmzYadPGl23aui6zopl94Mn5smG-rB0qT8 Instead, create reusable test implementations and utilities That way, when some class changes, we only have to update a few test utils or mocks Avoid timeouts in JUnit tests # Generally speaking, we should avoid setting local timeouts in JUnit tests but rather depend on the global timeout in Azure. The global timeout benefits from taking thread dumps just before timing out the build, easing debugging.
 At the same time, any timeout value that you manually set is arbitrary. If it&rsquo;s set too low, you get test instabilities. What too low means depends on numerous factors, such as hardware and current utilization (especially I/O). Moreover, a local timeout is more maintenance-intensive. It&rsquo;s one more knob where you can tweak a build. If you change the test a bit, you also need to double-check the timeout. Hence, there have been quite a few commits that just increase timeouts.
 We are keeping such frameworks out of Flink, to make debugging easier and avoid dependency clashes.&#160;&#x21a9;&#xfe0e;
-`}),e.add({id:292,href:"/how-to-contribute/code-style-and-quality-components/",title:"Code Style and Quality Guide — Components Guide",section:"How to Contribute",content:` Code Style and Quality Guide — Components Guide # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Component Specific Guidelines # Additional guidelines about changes in specific components.
+`}),e.add({id:293,href:"/how-to-contribute/code-style-and-quality-components/",title:"Code Style and Quality Guide — Components Guide",section:"How to Contribute",content:` Code Style and Quality Guide — Components Guide # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Component Specific Guidelines # Additional guidelines about changes in specific components.
 Configuration Changes # Where should the config option go?
 ‘flink-conf.yaml’: All configuration that pertains to execution behavior that one may want to standardize across jobs. Think of it as parameters someone would set wearing an “ops” hat, or someone that provides a stream processing platform to other teams.
 ‘ExecutionConfig’: Parameters specific to an individual Flink application, needed by the operators during execution. Typical examples are watermark interval, serializer parameters, object reuse.
@@ -5716,7 +5726,7 @@
 SQL natively supports NULL for almost every operation and has a 3-valued boolean logic. Make sure to test every feature for nullability as well. Avoid full integration tests
 Spawning a Flink mini-cluster and performing compilation of generated code for a SQL query is expensive. Avoid integration tests for planner tests or variations of API calls. Instead, use unit tests that validate the optimized plan which comes out of a planner. Or test the behavior of a runtime operator directly. Compatibility # Don’t introduce physical plan changes in patch releases!
 Backwards compatibility for state in streaming SQL relies on the fact that the physical execution plan remains stable. Otherwise the generated Operator Names/IDs change and state cannot be matched and restored. Every bug fix that leads to changes in the optimized physical plan of a streaming pipeline hences breaks compatibility. As a consequence, changes of the kind that lead to different optimizer plans can only be merged in major releases for now. Scala / Java interoperability (legacy code parts) # Keep Java in mind when designing interfaces.
-Consider whether a class will need to interact with a Java class in the future. Use Java collections and Java Optional in interfaces for a smooth integration with Java code. Don’t use features of case classes such as .copy() or apply() for construction if a class is subjected to be converted to Java. Pure Scala user-facing APIs should use pure Scala collections/iterables/etc. for natural and idiomatic (“scalaesk”) integration with Scala. `}),e.add({id:293,href:"/how-to-contribute/code-style-and-quality-formatting/",title:"Code Style and Quality Guide — Formatting Guide",section:"How to Contribute",content:` Code Style and Quality Guide — Formatting Guide # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Java Code Formatting Style # We recommend to set up the IDE to automatically check the code style. Please follow the IDE Setup Guide to set up spotless and checkstyle .
+Consider whether a class will need to interact with a Java class in the future. Use Java collections and Java Optional in interfaces for a smooth integration with Java code. Don’t use features of case classes such as .copy() or apply() for construction if a class is subjected to be converted to Java. Pure Scala user-facing APIs should use pure Scala collections/iterables/etc. for natural and idiomatic (“scalaesk”) integration with Scala. `}),e.add({id:294,href:"/how-to-contribute/code-style-and-quality-formatting/",title:"Code Style and Quality Guide — Formatting Guide",section:"How to Contribute",content:` Code Style and Quality Guide — Formatting Guide # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Java Code Formatting Style # We recommend to set up the IDE to automatically check the code style. Please follow the IDE Setup Guide to set up spotless and checkstyle .
 License # Apache license headers. Make sure you have Apache License headers in your files. The RAT plugin is checking for that when you build the code. Imports # Empty line before and after package declaration. No unused imports. No redundant imports. No wildcard imports. They can cause problems when adding to the code and in some cases even during refactoring. Import order. Imports must be ordered alphabetically, grouped into the following blocks, with each block separated by an empty line: &lt;imports from org.apache.flink.*&gt; &lt;imports from org.apache.flink.shaded.*&gt; &lt;imports from other libraries&gt; &lt;imports from javax.*&gt; &lt;imports from java.*&gt; &lt;imports from scala.*&gt; &lt;static imports&gt; Naming # Package names must start with a letter, and must not contain upper-case letters or special characters. Non-private static final fields must be upper-case, with words being separated by underscores.(MY_STATIC_VARIABLE) Non-static fields/methods must be in lower camel case. (myNonStaticField) Whitespaces # Tabs vs. spaces. We are using spaces for indentation, not tabs. No trailing whitespace. Spaces around operators/keywords. Operators (+, =, &gt;, …) and keywords (if, for, catch, …) must have a space before and after them, provided they are not at the start or end of the line. Breaking the lines of too long statements # In general long lines should be avoided for the better readability. Try to use short statements which operate on the same level of abstraction. Break the long statements by creating more local variables, defining helper functions etc.
 Two major sources of long lines are:
 Long list of arguments in function declaration or call: void func(type1 arg1, type2 arg2, ...) Long sequence of chained calls: list.stream().map(...).reduce(...).collect(...)... Rules about breaking the long lines:
@@ -5724,7 +5734,7 @@
 The opening parenthesis always stays on the line of the parent function name The possible thrown exception list is never broken and stays on the same last line, even if the line length exceeds its limit The line of the function argument should end with a comma staying on the same line except the last argument Example of breaking the list of function arguments:
 public void func( int arg1, int arg2, ...) throws E1, E2, E3 { } The dot of a chained call is always on the line of that chained call proceeding the call at the beginning.
 Example of breaking the list of chained calls:
-values .stream() .map(...) .collect(...); Braces # Left curly braces ({) must not be placed on a new line. Right curly braces (}) must always be placed at the beginning of the line. Blocks. All statements after if, for, while, do, … must always be encapsulated in a block with curly braces (even if the block contains one statement). Javadocs # All public/protected methods and classes must have a Javadoc. The first sentence of the Javadoc must end with a period. Paragraphs must be separated with a new line, and started with . Modifiers # No redundant modifiers. For example, public modifiers in interface methods. Follow JLS3 modifier order. Modifiers must be ordered in the following order: public, protected, private, abstract, static, final, transient, volatile, synchronized, native, strictfp. Files # All files must end with \\n. File length must not exceed 3000 lines. Misc # Arrays must be defined Java-style. For example, public String[] array. Use Flink Preconditions. To increase homogeneity, consistently use the org.apache.flink.Preconditions methods checkNotNull and checkArgument rather than Apache Commons Validate or Google Guava. `}),e.add({id:294,href:"/how-to-contribute/code-style-and-quality-java/",title:"Code Style and Quality Guide — Java",section:"How to Contribute",content:` Code Style and Quality Guide — Java # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Java Language Features and Libraries # Preconditions and Log Statements # Never concatenate strings in the parameters Don’t: Preconditions.checkState(value &lt;= threshold, &quot;value must be below &quot; + threshold) Don’t: LOG.debug(&quot;value is &quot; + value) Do: Preconditions.checkState(value &lt;= threshold, &quot;value must be below %s&quot;, threshold) Do: LOG.debug(&quot;value is {}&quot;, value) Generics # No raw types: Do not use raw types, unless strictly necessary (sometimes necessary for signature matches, arrays). Suppress warnings for unchecked conversions: Add annotations to suppress warnings, if they cannot be avoided (such as “unchecked”, or “serial”). Otherwise warnings about generics flood the build and drown relevant warnings. equals() / hashCode() # equals() / hashCode() should be added when they are well defined only. They should not be added to enable a simpler assertion in tests when they are not well defined. Use hamcrest matchers in that case: https://github.com/junit-team/junit4/wiki/matchers-and-assertthat A common indicator that the methods are not well defined is when they take a subset of the fields into account (other than fields that are purely auxiliary). When the methods take mutable fields into account, you often have a design issue. The equals()/hashCode() methods suggest to use the type as a key, but the signatures suggest it is safe to keep mutating the type. Java Serialization # Do not use Java Serialization for anything !!!
+values .stream() .map(...) .collect(...); Braces # Left curly braces ({) must not be placed on a new line. Right curly braces (}) must always be placed at the beginning of the line. Blocks. All statements after if, for, while, do, … must always be encapsulated in a block with curly braces (even if the block contains one statement). Javadocs # All public/protected methods and classes must have a Javadoc. The first sentence of the Javadoc must end with a period. Paragraphs must be separated with a new line, and started with . Modifiers # No redundant modifiers. For example, public modifiers in interface methods. Follow JLS3 modifier order. Modifiers must be ordered in the following order: public, protected, private, abstract, static, final, transient, volatile, synchronized, native, strictfp. Files # All files must end with \\n. File length must not exceed 3000 lines. Misc # Arrays must be defined Java-style. For example, public String[] array. Use Flink Preconditions. To increase homogeneity, consistently use the org.apache.flink.Preconditions methods checkNotNull and checkArgument rather than Apache Commons Validate or Google Guava. `}),e.add({id:295,href:"/how-to-contribute/code-style-and-quality-java/",title:"Code Style and Quality Guide — Java",section:"How to Contribute",content:` Code Style and Quality Guide — Java # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Java Language Features and Libraries # Preconditions and Log Statements # Never concatenate strings in the parameters Don’t: Preconditions.checkState(value &lt;= threshold, &quot;value must be below &quot; + threshold) Don’t: LOG.debug(&quot;value is &quot; + value) Do: Preconditions.checkState(value &lt;= threshold, &quot;value must be below %s&quot;, threshold) Do: LOG.debug(&quot;value is {}&quot;, value) Generics # No raw types: Do not use raw types, unless strictly necessary (sometimes necessary for signature matches, arrays). Suppress warnings for unchecked conversions: Add annotations to suppress warnings, if they cannot be avoided (such as “unchecked”, or “serial”). Otherwise warnings about generics flood the build and drown relevant warnings. equals() / hashCode() # equals() / hashCode() should be added when they are well defined only. They should not be added to enable a simpler assertion in tests when they are not well defined. Use hamcrest matchers in that case: https://github.com/junit-team/junit4/wiki/matchers-and-assertthat A common indicator that the methods are not well defined is when they take a subset of the fields into account (other than fields that are purely auxiliary). When the methods take mutable fields into account, you often have a design issue. The equals()/hashCode() methods suggest to use the type as a key, but the signatures suggest it is safe to keep mutating the type. Java Serialization # Do not use Java Serialization for anything !!!
 Do not use Java Serialization for anything !!! !!!
 Do not use Java Serialization for anything !!! !!! !!!
 Internal to Flink, Java serialization is used to transport messages and programs through RPC. This is the only case where we use Java serialization. Because of that, some classes need to be serializable (if they are transported via RPC).
@@ -5738,7 +5748,7 @@
 map.computeIfAbsent(key, k -&gt; k.toLowerCase()); Consider method references instead of inline lambdas
 don’t:
 map.computeIfAbsent(key, k-&gt; Loader.load(k)); do:
-map.computeIfAbsent(key, Loader::load); Java Streams # Avoid Java Streams in any performance critical code. The main motivation to use Java Streams would be to improve code readability. As such, they can be a good match in parts of the code that are not data-intensive, but deal with coordination.. Even in the latter case, try to limit the scope to a method, or a few private methods within an internal class. `}),e.add({id:295,href:"/how-to-contribute/code-style-and-quality-pull-requests/",title:"Code Style and Quality Guide — Pull Requests & Changes",section:"How to Contribute",content:` Code Style and Quality Guide — Pull Requests &amp; Changes # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Rationale: We ask contributors to put in a little bit of extra effort to bring pull requests into a state that they can be more easily and more thoroughly reviewed. This helps the community in many ways:
+map.computeIfAbsent(key, Loader::load); Java Streams # Avoid Java Streams in any performance critical code. The main motivation to use Java Streams would be to improve code readability. As such, they can be a good match in parts of the code that are not data-intensive, but deal with coordination.. Even in the latter case, try to limit the scope to a method, or a few private methods within an internal class. `}),e.add({id:296,href:"/how-to-contribute/code-style-and-quality-pull-requests/",title:"Code Style and Quality Guide — Pull Requests & Changes",section:"How to Contribute",content:` Code Style and Quality Guide — Pull Requests &amp; Changes # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Rationale: We ask contributors to put in a little bit of extra effort to bring pull requests into a state that they can be more easily and more thoroughly reviewed. This helps the community in many ways:
 Reviews are much faster and thus contributions get merged sooner. We can ensure higher code quality by overlooking fewer issues in the contributions. Committers can review more contributions in the same time, which helps to keep up with the high rate of contributions that Flink is experiencing Please understand that contributions that do not follow this guide will take longer to review and thus will typically be picked up with lower priority by the community. That is not ill intend, it is due to the added complexity of reviewing unstructured Pull Requests.
 1. JIRA issue and Naming # Make sure that the pull request corresponds to a JIRA issue.
 Exceptions are hotfixes, like fixing typos in JavaDocs or documentation files.
@@ -5759,7 +5769,7 @@
 In some cases, the issue might be a subtask here, and the component may be different from the Pull Request’s main component. For example, when the commit introduces an end-to-end test for a runtime change, the PR would be tagged as [runtime], but the individual commit would be tagged as [e2e].
 Examples for commit messages:
 [hotfix] Fix update_branch_version.sh to allow version suffixes [hotfix] [table] Remove unused geometry dependency [FLINK-11704][tests] Improve AbstractCheckpointStateOutputStreamTestBase [FLINK-10569][runtime] Remove Instance usage in ExecutionVertexCancelTest [FLINK-11702][table-planner-blink] Introduce a new table type system 5. Changes to the observable behavior of the system # Contributors should be aware of changes in their PRs that break the observable behavior of Flink in any way because in many cases such changes can break existing setups. Red flags that should raise questions while coding or in reviews with respect to this problem are for example:
-Assertions have been changed to make tests pass again with the breaking change. Configuration setting that must suddenly be set to (non-default) values to keep existing tests passing. This can happen in particular for new settings with a breaking default. Existing scripts or configurations have to be adjusted. `}),e.add({id:296,href:"/how-to-contribute/code-style-and-quality-scala/",title:"Code Style and Quality Guide — Scala",section:"How to Contribute",content:` Code Style and Quality Guide — Scala # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Scala Language Features # Where to use (and not use) Scala # We use Scala for Scala APIs or pure Scala Libraries.
+Assertions have been changed to make tests pass again with the breaking change. Configuration setting that must suddenly be set to (non-default) values to keep existing tests passing. This can happen in particular for new settings with a breaking default. Existing scripts or configurations have to be adjusted. `}),e.add({id:297,href:"/how-to-contribute/code-style-and-quality-scala/",title:"Code Style and Quality Guide — Scala",section:"How to Contribute",content:` Code Style and Quality Guide — Scala # Preamble # Pull Requests &amp; Changes # Common Coding Guide # Java Language Guide # Scala Language Guide # Components Guide # Formatting Guide # Scala Language Features # Where to use (and not use) Scala # We use Scala for Scala APIs or pure Scala Libraries.
 We do not use Scala in the core APIs and runtime components. We aim to remove existing Scala use (code and dependencies) from those components.
 ⇒ This is not because we do not like Scala, it is a consequence of “the right tool for the right job” approach (see below).
 For APIs, we develop the foundation in Java, and layer Scala on top.
@@ -5772,16 +5782,16 @@
 var expressions = new java.util.ArrayList[String]() Do:
 var expressions: java.util.List[String] = new java.util.ArrayList[]() Type inference for local variables on the stack is fine.
 Use strict visibility. Avoid Scala’s package private features (such as private[flink]) and use regular private/protected instead. Keep in mind that private[flink] and protected members are public in Java. Keep in mind that private[flink] still exposes all members in Flink provided examples. Coding Formatting # Use line wrapping to structure your code.
-Scala’s functional nature allows for long transformation chains (x.map().map().foreach()). In order to force implementers to structure their code, the line length is therefore limited to 100 characters. Use one line per transformation for better maintainability. `}),e.add({id:297,href:"/flink-packages/",title:"flink-packages.org",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is the Flink Kubernetes Operator? # All information on the flink-packages can be found on the flink-packages website.
-`}),e.add({id:298,href:"/material/",title:"Material",section:"Apache Flink® — Stateful Computations over Data Streams",content:` Material # Apache Flink Logos # We provide the Apache Flink logo in different sizes and formats. You can download all variants (7.4 MB) or just pick the one you need from this page.
+Scala’s functional nature allows for long transformation chains (x.map().map().foreach()). In order to force implementers to structure their code, the line length is therefore limited to 100 characters. Use one line per transformation for better maintainability. `}),e.add({id:298,href:"/flink-packages/",title:"flink-packages.org",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is the Flink Kubernetes Operator? # All information on the flink-packages can be found on the flink-packages website.
+`}),e.add({id:299,href:"/material/",title:"Material",section:"Apache Flink® — Stateful Computations over Data Streams",content:` Material # Apache Flink Logos # We provide the Apache Flink logo in different sizes and formats. You can download all variants (7.4 MB) or just pick the one you need from this page.
 Portable Network Graphics (PNG) # Colored logo White filled logo Black outline logo Sizes (px) 50x50, 100x100, 200x200, 500x500, 1000x1000 Sizes (px): 50x50, 100x100, 200x200, 500x500, 1000x1000
 Sizes (px): 50x50, 100x100, 200x200, 500x500, 1000x1000 You can find more variants of the logo in this directory or download all variants (7.4 MB).
 Scalable Vector Graphics (SVG) # Colored logo White filled logo Black outline logo Colored logo with black text (color_black.svg) White filled logo (white_filled.svg) Black outline logo (black_outline.svg) You can find more variants of the logo in this directory or download all variants (7.4 MB).
 Photoshop (PSD) # You can download the logo in PSD format as well:
 Colored logo: 1000x1000. Black outline logo with text: 1000x1000, 5000x5000. You can find more variants of the logo in this directory or download all variants (7.4 MB).
 Color Scheme # You can use the provided color scheme which incorporates some colors of the Flink logo:
-PDF color scheme Powerpoint color scheme `}),e.add({id:299,href:"/what-is-flink-ml/",title:"What is Flink ML?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is Stateful Functions? # All information on Flink ML can be found on the Flink ML website.
-`}),e.add({id:300,href:"/what-is-flink-table-store/",title:"What is Paimon(incubating) (formerly Flink Table Store)?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is Apache Paimon (formerly Flink Table Store)? # The Flink Table Store had joined Apache Incubator as Apache Paimon(incubating). All information on the Apache Paimon(incubating) can be found on the Paimon website.
-`}),e.add({id:301,href:"/what-is-stateful-functions/",title:"What is Stateful Functions?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is Stateful Functions? # All information on Stateful Functions can be found on the Stateful Functions project website.
-`}),e.add({id:302,href:"/what-is-the-flink-kubernetes-operator/",title:"What is the Flink Kubernetes Operator?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is the Flink Kubernetes Operator? # All information on the Flink Kubernetes Operator can be found on the Flink Kubernetes Operator website.
+PDF color scheme Powerpoint color scheme `}),e.add({id:300,href:"/what-is-flink-ml/",title:"What is Flink ML?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is Stateful Functions? # All information on Flink ML can be found on the Flink ML website.
+`}),e.add({id:301,href:"/what-is-flink-table-store/",title:"What is Paimon(incubating) (formerly Flink Table Store)?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is Apache Paimon (formerly Flink Table Store)? # The Flink Table Store had joined Apache Incubator as Apache Paimon(incubating). All information on the Apache Paimon(incubating) can be found on the Paimon website.
+`}),e.add({id:302,href:"/what-is-stateful-functions/",title:"What is Stateful Functions?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is Stateful Functions? # All information on Stateful Functions can be found on the Stateful Functions project website.
+`}),e.add({id:303,href:"/what-is-the-flink-kubernetes-operator/",title:"What is the Flink Kubernetes Operator?",section:"Apache Flink® — Stateful Computations over Data Streams",content:` What is the Flink Kubernetes Operator? # All information on the Flink Kubernetes Operator can be found on the Flink Kubernetes Operator website.
 `})})()
\ No newline at end of file
diff --git a/content/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js b/content/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js
deleted file mode 100644
index 1377b37..0000000
--- a/content/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js
+++ /dev/null
@@ -1 +0,0 @@
-"use strict";(function(){const e=document.querySelector("#book-search-input"),t=document.querySelector("#book-search-results");if(!e)return;e.addEventListener("focus",n),e.addEventListener("keyup",s),document.addEventListener("keypress",i);function i(t){if(e===document.activeElement)return;const n=String.fromCharCode(t.charCode);if(!a(n))return;e.focus(),t.preventDefault()}function a(t){const n=e.getAttribute("data-hotkeys")||"";return n.indexOf(t)>=0}function n(){e.removeEventListener("focus",n),e.required=!0,o("/flexsearch.min.js"),o("/en.search-data.min.99ba92d527f4c3d92b29f41d78147697fa1d346b3432b5c524cae5356f163ffc.js",function(){e.required=!1,s()})}function s(){for(;t.firstChild;)t.removeChild(t.firstChild);if(!e.value)return;const n=window.bookSearchIndex.search(e.value,10);n.forEach(function(e){const n=r("<li><a href></a><small></small></li>"),s=n.querySelector("a"),o=n.querySelector("small");s.href=e.href,s.textContent=e.title,o.textContent=e.section,t.appendChild(n)})}function o(e,t){const n=document.createElement("script");n.defer=!0,n.async=!1,n.src=e,n.onload=t,document.head.appendChild(n)}function r(e){const t=document.createElement("div");return t.innerHTML=e,t.firstChild}})()
\ No newline at end of file
diff --git a/content/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js b/content/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js
similarity index 89%
rename from content/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js
rename to content/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js
index 8190c91..4ce2aca 100644
--- a/content/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js
+++ b/content/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js
@@ -1 +1 @@
-"use strict";(function(){const e=document.querySelector("#book-search-input"),t=document.querySelector("#book-search-results");if(!e)return;e.addEventListener("focus",n),e.addEventListener("keyup",s),document.addEventListener("keypress",i);function i(t){if(e===document.activeElement)return;const n=String.fromCharCode(t.charCode);if(!a(n))return;e.focus(),t.preventDefault()}function a(t){const n=e.getAttribute("data-hotkeys")||"";return n.indexOf(t)>=0}function n(){e.removeEventListener("focus",n),e.required=!0,o("/flexsearch.min.js"),o("/zh.search-data.min.a53a1d5f39df3a7b6193a82c102bf76ca2ef8af7a10e97686f0ab442b4235f38.js",function(){e.required=!1,s()})}function s(){for(;t.firstChild;)t.removeChild(t.firstChild);if(!e.value)return;const n=window.bookSearchIndex.search(e.value,10);n.forEach(function(e){const n=r("<li><a href></a><small></small></li>"),s=n.querySelector("a"),o=n.querySelector("small");s.href=e.href,s.textContent=e.title,o.textContent=e.section,t.appendChild(n)})}function o(e,t){const n=document.createElement("script");n.defer=!0,n.async=!1,n.src=e,n.onload=t,document.head.appendChild(n)}function r(e){const t=document.createElement("div");return t.innerHTML=e,t.firstChild}})()
\ No newline at end of file
+"use strict";(function(){const e=document.querySelector("#book-search-input"),t=document.querySelector("#book-search-results");if(!e)return;e.addEventListener("focus",n),e.addEventListener("keyup",s),document.addEventListener("keypress",i);function i(t){if(e===document.activeElement)return;const n=String.fromCharCode(t.charCode);if(!a(n))return;e.focus(),t.preventDefault()}function a(t){const n=e.getAttribute("data-hotkeys")||"";return n.indexOf(t)>=0}function n(){e.removeEventListener("focus",n),e.required=!0,o("/flexsearch.min.js"),o("/en.search-data.min.72f68b6b627b36c5e28a8b961a951bedb080cdb7b03ad224b68e89aa6fa8cb88.js",function(){e.required=!1,s()})}function s(){for(;t.firstChild;)t.removeChild(t.firstChild);if(!e.value)return;const n=window.bookSearchIndex.search(e.value,10);n.forEach(function(e){const n=r("<li><a href></a><small></small></li>"),s=n.querySelector("a"),o=n.querySelector("small");s.href=e.href,s.textContent=e.title,o.textContent=e.section,t.appendChild(n)})}function o(e,t){const n=document.createElement("script");n.defer=!0,n.async=!1,n.src=e,n.onload=t,document.head.appendChild(n)}function r(e){const t=document.createElement("div");return t.innerHTML=e,t.firstChild}})()
\ No newline at end of file
diff --git a/content/en/sitemap.xml b/content/en/sitemap.xml
index 089f887..784e69f 100644
--- a/content/en/sitemap.xml
+++ b/content/en/sitemap.xml
@@ -3,7 +3,7 @@
   xmlns:xhtml="http://www.w3.org/1999/xhtml">
   <url>
     <loc>https://flink.apache.org/posts/</loc>
-    <lastmod>2025-07-31T00:00:00+00:00</lastmod>
+    <lastmod>2025-09-26T08:00:00+00:00</lastmod>
   </url><url>
     <loc>https://flink.apache.org/what-is-flink/flink-architecture/</loc>
     <xhtml:link
@@ -473,11 +473,11 @@
                 href="https://flink.apache.org/how-to-contribute/getting-help/"
                 />
   </url><url>
-    <loc>https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/</loc>
-    <lastmod>2025-07-31T00:00:00+00:00</lastmod>
+    <loc>https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/</loc>
+    <lastmod>2025-09-26T08:00:00+00:00</lastmod>
   </url><url>
     <loc>https://flink.apache.org/</loc>
-    <lastmod>2025-07-31T00:00:00+00:00</lastmod>
+    <lastmod>2025-09-26T08:00:00+00:00</lastmod>
     <xhtml:link
                 rel="alternate"
                 hreflang="zh"
@@ -489,6 +489,9 @@
                 href="https://flink.apache.org/"
                 />
   </url><url>
+    <loc>https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/</loc>
+    <lastmod>2025-07-31T00:00:00+00:00</lastmod>
+  </url><url>
     <loc>https://flink.apache.org/2025/07/10/apache-flink-1.19.3-release-announcement/</loc>
     <lastmod>2025-07-10T00:00:00+00:00</lastmod>
   </url><url>
diff --git a/content/flink-packages/index.html b/content/flink-packages/index.html
index 4b2902b..2dfd45f 100644
--- a/content/flink-packages/index.html
+++ b/content/flink-packages/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/flink-packages/" title="flink-packages.org">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/getting-started/index.html b/content/getting-started/index.html
index 0039e06..12d6be4 100644
--- a/content/getting-started/index.html
+++ b/content/getting-started/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/getting-started/" title="教程">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/getting-started/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/getting-started/training-course/index.html b/content/getting-started/training-course/index.html
index 78e36ae..c60d8ee 100644
--- a/content/getting-started/training-course/index.html
+++ b/content/getting-started/training-course/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/getting-started/training-course/" title="Training Course">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/getting-started/with-flink-cdc/index.html b/content/getting-started/with-flink-cdc/index.html
index d36a40a..6f37398 100644
--- a/content/getting-started/with-flink-cdc/index.html
+++ b/content/getting-started/with-flink-cdc/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/getting-started/with-flink-cdc/" title="With Flink CDC">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/getting-started/with-flink-kubernetes-operator/index.html b/content/getting-started/with-flink-kubernetes-operator/index.html
index 4612614..ac7a776 100644
--- a/content/getting-started/with-flink-kubernetes-operator/index.html
+++ b/content/getting-started/with-flink-kubernetes-operator/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/getting-started/with-flink-kubernetes-operator/" title="With Flink Kubernetes Operator">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/getting-started/with-flink-ml/index.html b/content/getting-started/with-flink-ml/index.html
index ac339eb..6074410 100644
--- a/content/getting-started/with-flink-ml/index.html
+++ b/content/getting-started/with-flink-ml/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/getting-started/with-flink-ml/" title="With Flink ML">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/getting-started/with-flink-stateful-functions/index.html b/content/getting-started/with-flink-stateful-functions/index.html
index 6a6e339..6dc619f 100644
--- a/content/getting-started/with-flink-stateful-functions/index.html
+++ b/content/getting-started/with-flink-stateful-functions/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/getting-started/with-flink-stateful-functions/" title="With Flink Stateful Functions">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/getting-started/with-flink/index.html b/content/getting-started/with-flink/index.html
index 000430e..043a42f 100644
--- a/content/getting-started/with-flink/index.html
+++ b/content/getting-started/with-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/getting-started/with-flink/" title="With Flink">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/code-style-and-quality-common/index.html b/content/how-to-contribute/code-style-and-quality-common/index.html
index e9e0dc8..f4b4b13 100644
--- a/content/how-to-contribute/code-style-and-quality-common/index.html
+++ b/content/how-to-contribute/code-style-and-quality-common/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/code-style-and-quality-common/" title="Code Style and Quality Guide — Common Rules">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -481,33 +481,33 @@
   Code Style and Quality Guide — Common Rules
   <a class="anchor" href="#code-style-and-quality-guide--common-rules">#</a>
 </h1>
-<h4 id="preamblehahahugoshortcode62s0hbhb">
+<h4 id="preamblehahahugoshortcode57s0hbhb">
   <a href="/how-to-contribute/code-style-and-quality-preamble/">Preamble</a>
-  <a class="anchor" href="#preamblehahahugoshortcode62s0hbhb">#</a>
+  <a class="anchor" href="#preamblehahahugoshortcode57s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode62s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode57s1hbhb">
   <a href="/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode62s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode57s1hbhb">#</a>
 </h4>
-<h4 id="common-coding-guidehahahugoshortcode62s2hbhb">
+<h4 id="common-coding-guidehahahugoshortcode57s2hbhb">
   <a href="/how-to-contribute/code-style-and-quality-common/">Common Coding Guide</a>
-  <a class="anchor" href="#common-coding-guidehahahugoshortcode62s2hbhb">#</a>
+  <a class="anchor" href="#common-coding-guidehahahugoshortcode57s2hbhb">#</a>
 </h4>
-<h4 id="java-language-guidehahahugoshortcode62s3hbhb">
+<h4 id="java-language-guidehahahugoshortcode57s3hbhb">
   <a href="/how-to-contribute/code-style-and-quality-java/">Java Language Guide</a>
-  <a class="anchor" href="#java-language-guidehahahugoshortcode62s3hbhb">#</a>
+  <a class="anchor" href="#java-language-guidehahahugoshortcode57s3hbhb">#</a>
 </h4>
-<h4 id="scala-language-guidehahahugoshortcode62s4hbhb">
+<h4 id="scala-language-guidehahahugoshortcode57s4hbhb">
   <a href="/how-to-contribute/code-style-and-quality-scala/">Scala Language Guide</a>
-  <a class="anchor" href="#scala-language-guidehahahugoshortcode62s4hbhb">#</a>
+  <a class="anchor" href="#scala-language-guidehahahugoshortcode57s4hbhb">#</a>
 </h4>
-<h4 id="components-guidehahahugoshortcode62s5hbhb">
+<h4 id="components-guidehahahugoshortcode57s5hbhb">
   <a href="/how-to-contribute/code-style-and-quality-components/">Components Guide</a>
-  <a class="anchor" href="#components-guidehahahugoshortcode62s5hbhb">#</a>
+  <a class="anchor" href="#components-guidehahahugoshortcode57s5hbhb">#</a>
 </h4>
-<h4 id="formatting-guidehahahugoshortcode62s6hbhb">
+<h4 id="formatting-guidehahahugoshortcode57s6hbhb">
   <a href="/how-to-contribute/code-style-and-quality-formatting/">Formatting Guide</a>
-  <a class="anchor" href="#formatting-guidehahahugoshortcode62s6hbhb">#</a>
+  <a class="anchor" href="#formatting-guidehahahugoshortcode57s6hbhb">#</a>
 </h4>
 <hr>
 <h2 id="1-copyright">
diff --git a/content/how-to-contribute/code-style-and-quality-components/index.html b/content/how-to-contribute/code-style-and-quality-components/index.html
index 007ad6f..8d405ac 100644
--- a/content/how-to-contribute/code-style-and-quality-components/index.html
+++ b/content/how-to-contribute/code-style-and-quality-components/index.html
@@ -32,7 +32,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/code-style-and-quality-components/" title="Apache Flink 代码样式和质量指南 — 组件">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -483,33 +483,33 @@
   Code Style and Quality Guide — Components Guide
   <a class="anchor" href="#code-style-and-quality-guide--components-guide">#</a>
 </h1>
-<h4 id="preamblehahahugoshortcode75s0hbhb">
+<h4 id="preamblehahahugoshortcode72s0hbhb">
   <a href="/how-to-contribute/code-style-and-quality-preamble/">Preamble</a>
-  <a class="anchor" href="#preamblehahahugoshortcode75s0hbhb">#</a>
+  <a class="anchor" href="#preamblehahahugoshortcode72s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode75s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode72s1hbhb">
   <a href="/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode75s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode72s1hbhb">#</a>
 </h4>
-<h4 id="common-coding-guidehahahugoshortcode75s2hbhb">
+<h4 id="common-coding-guidehahahugoshortcode72s2hbhb">
   <a href="/how-to-contribute/code-style-and-quality-common/">Common Coding Guide</a>
-  <a class="anchor" href="#common-coding-guidehahahugoshortcode75s2hbhb">#</a>
+  <a class="anchor" href="#common-coding-guidehahahugoshortcode72s2hbhb">#</a>
 </h4>
-<h4 id="java-language-guidehahahugoshortcode75s3hbhb">
+<h4 id="java-language-guidehahahugoshortcode72s3hbhb">
   <a href="/how-to-contribute/code-style-and-quality-java/">Java Language Guide</a>
-  <a class="anchor" href="#java-language-guidehahahugoshortcode75s3hbhb">#</a>
+  <a class="anchor" href="#java-language-guidehahahugoshortcode72s3hbhb">#</a>
 </h4>
-<h4 id="scala-language-guidehahahugoshortcode75s4hbhb">
+<h4 id="scala-language-guidehahahugoshortcode72s4hbhb">
   <a href="/how-to-contribute/code-style-and-quality-scala/">Scala Language Guide</a>
-  <a class="anchor" href="#scala-language-guidehahahugoshortcode75s4hbhb">#</a>
+  <a class="anchor" href="#scala-language-guidehahahugoshortcode72s4hbhb">#</a>
 </h4>
-<h4 id="components-guidehahahugoshortcode75s5hbhb">
+<h4 id="components-guidehahahugoshortcode72s5hbhb">
   <a href="/how-to-contribute/code-style-and-quality-components/">Components Guide</a>
-  <a class="anchor" href="#components-guidehahahugoshortcode75s5hbhb">#</a>
+  <a class="anchor" href="#components-guidehahahugoshortcode72s5hbhb">#</a>
 </h4>
-<h4 id="formatting-guidehahahugoshortcode75s6hbhb">
+<h4 id="formatting-guidehahahugoshortcode72s6hbhb">
   <a href="/how-to-contribute/code-style-and-quality-formatting/">Formatting Guide</a>
-  <a class="anchor" href="#formatting-guidehahahugoshortcode75s6hbhb">#</a>
+  <a class="anchor" href="#formatting-guidehahahugoshortcode72s6hbhb">#</a>
 </h4>
 <h2 id="component-specific-guidelines">
   Component Specific Guidelines
diff --git a/content/how-to-contribute/code-style-and-quality-formatting/index.html b/content/how-to-contribute/code-style-and-quality-formatting/index.html
index b65b785..e342ed6 100644
--- a/content/how-to-contribute/code-style-and-quality-formatting/index.html
+++ b/content/how-to-contribute/code-style-and-quality-formatting/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/code-style-and-quality-formatting/" title="Code Style and Quality Guide — Formatting Guide">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -481,33 +481,33 @@
   Code Style and Quality Guide — Formatting Guide
   <a class="anchor" href="#code-style-and-quality-guide--formatting-guide">#</a>
 </h1>
-<h4 id="preamblehahahugoshortcode65s0hbhb">
+<h4 id="preamblehahahugoshortcode70s0hbhb">
   <a href="/how-to-contribute/code-style-and-quality-preamble/">Preamble</a>
-  <a class="anchor" href="#preamblehahahugoshortcode65s0hbhb">#</a>
+  <a class="anchor" href="#preamblehahahugoshortcode70s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode65s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode70s1hbhb">
   <a href="/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode65s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode70s1hbhb">#</a>
 </h4>
-<h4 id="common-coding-guidehahahugoshortcode65s2hbhb">
+<h4 id="common-coding-guidehahahugoshortcode70s2hbhb">
   <a href="/how-to-contribute/code-style-and-quality-common/">Common Coding Guide</a>
-  <a class="anchor" href="#common-coding-guidehahahugoshortcode65s2hbhb">#</a>
+  <a class="anchor" href="#common-coding-guidehahahugoshortcode70s2hbhb">#</a>
 </h4>
-<h4 id="java-language-guidehahahugoshortcode65s3hbhb">
+<h4 id="java-language-guidehahahugoshortcode70s3hbhb">
   <a href="/how-to-contribute/code-style-and-quality-java/">Java Language Guide</a>
-  <a class="anchor" href="#java-language-guidehahahugoshortcode65s3hbhb">#</a>
+  <a class="anchor" href="#java-language-guidehahahugoshortcode70s3hbhb">#</a>
 </h4>
-<h4 id="scala-language-guidehahahugoshortcode65s4hbhb">
+<h4 id="scala-language-guidehahahugoshortcode70s4hbhb">
   <a href="/how-to-contribute/code-style-and-quality-scala/">Scala Language Guide</a>
-  <a class="anchor" href="#scala-language-guidehahahugoshortcode65s4hbhb">#</a>
+  <a class="anchor" href="#scala-language-guidehahahugoshortcode70s4hbhb">#</a>
 </h4>
-<h4 id="components-guidehahahugoshortcode65s5hbhb">
+<h4 id="components-guidehahahugoshortcode70s5hbhb">
   <a href="/how-to-contribute/code-style-and-quality-components/">Components Guide</a>
-  <a class="anchor" href="#components-guidehahahugoshortcode65s5hbhb">#</a>
+  <a class="anchor" href="#components-guidehahahugoshortcode70s5hbhb">#</a>
 </h4>
-<h4 id="formatting-guidehahahugoshortcode65s6hbhb">
+<h4 id="formatting-guidehahahugoshortcode70s6hbhb">
   <a href="/how-to-contribute/code-style-and-quality-formatting/">Formatting Guide</a>
-  <a class="anchor" href="#formatting-guidehahahugoshortcode65s6hbhb">#</a>
+  <a class="anchor" href="#formatting-guidehahahugoshortcode70s6hbhb">#</a>
 </h4>
 <h2 id="java-code-formatting-style">
   Java Code Formatting Style
diff --git a/content/how-to-contribute/code-style-and-quality-java/index.html b/content/how-to-contribute/code-style-and-quality-java/index.html
index f0ecddb..ea78d4a 100644
--- a/content/how-to-contribute/code-style-and-quality-java/index.html
+++ b/content/how-to-contribute/code-style-and-quality-java/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/code-style-and-quality-java/" title="Code Style and Quality Guide — Java">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -479,33 +479,33 @@
   Code Style and Quality Guide — Java
   <a class="anchor" href="#code-style-and-quality-guide--java">#</a>
 </h1>
-<h4 id="preamblehahahugoshortcode60s0hbhb">
+<h4 id="preamblehahahugoshortcode71s0hbhb">
   <a href="/how-to-contribute/code-style-and-quality-preamble/">Preamble</a>
-  <a class="anchor" href="#preamblehahahugoshortcode60s0hbhb">#</a>
+  <a class="anchor" href="#preamblehahahugoshortcode71s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode60s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode71s1hbhb">
   <a href="/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode60s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode71s1hbhb">#</a>
 </h4>
-<h4 id="common-coding-guidehahahugoshortcode60s2hbhb">
+<h4 id="common-coding-guidehahahugoshortcode71s2hbhb">
   <a href="/how-to-contribute/code-style-and-quality-common/">Common Coding Guide</a>
-  <a class="anchor" href="#common-coding-guidehahahugoshortcode60s2hbhb">#</a>
+  <a class="anchor" href="#common-coding-guidehahahugoshortcode71s2hbhb">#</a>
 </h4>
-<h4 id="java-language-guidehahahugoshortcode60s3hbhb">
+<h4 id="java-language-guidehahahugoshortcode71s3hbhb">
   <a href="/how-to-contribute/code-style-and-quality-java/">Java Language Guide</a>
-  <a class="anchor" href="#java-language-guidehahahugoshortcode60s3hbhb">#</a>
+  <a class="anchor" href="#java-language-guidehahahugoshortcode71s3hbhb">#</a>
 </h4>
-<h4 id="scala-language-guidehahahugoshortcode60s4hbhb">
+<h4 id="scala-language-guidehahahugoshortcode71s4hbhb">
   <a href="/how-to-contribute/code-style-and-quality-scala/">Scala Language Guide</a>
-  <a class="anchor" href="#scala-language-guidehahahugoshortcode60s4hbhb">#</a>
+  <a class="anchor" href="#scala-language-guidehahahugoshortcode71s4hbhb">#</a>
 </h4>
-<h4 id="components-guidehahahugoshortcode60s5hbhb">
+<h4 id="components-guidehahahugoshortcode71s5hbhb">
   <a href="/how-to-contribute/code-style-and-quality-components/">Components Guide</a>
-  <a class="anchor" href="#components-guidehahahugoshortcode60s5hbhb">#</a>
+  <a class="anchor" href="#components-guidehahahugoshortcode71s5hbhb">#</a>
 </h4>
-<h4 id="formatting-guidehahahugoshortcode60s6hbhb">
+<h4 id="formatting-guidehahahugoshortcode71s6hbhb">
   <a href="/how-to-contribute/code-style-and-quality-formatting/">Formatting Guide</a>
-  <a class="anchor" href="#formatting-guidehahahugoshortcode60s6hbhb">#</a>
+  <a class="anchor" href="#formatting-guidehahahugoshortcode71s6hbhb">#</a>
 </h4>
 <h2 id="java-language-features-and-libraries">
   Java Language Features and Libraries
diff --git a/content/how-to-contribute/code-style-and-quality-preamble/index.html b/content/how-to-contribute/code-style-and-quality-preamble/index.html
index 26e21ef..ed4ae42 100644
--- a/content/how-to-contribute/code-style-and-quality-preamble/index.html
+++ b/content/how-to-contribute/code-style-and-quality-preamble/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/code-style-and-quality-preamble/" title="代码样式与质量指南">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -481,33 +481,33 @@
   Apache Flink Code Style and Quality Guide
   <a class="anchor" href="#apache-flink-code-style-and-quality-guide">#</a>
 </h1>
-<h4 id="preamblehahahugoshortcode63s0hbhb">
+<h4 id="preamblehahahugoshortcode74s0hbhb">
   <a href="/how-to-contribute/code-style-and-quality-preamble/">Preamble</a>
-  <a class="anchor" href="#preamblehahahugoshortcode63s0hbhb">#</a>
+  <a class="anchor" href="#preamblehahahugoshortcode74s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode63s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode74s1hbhb">
   <a href="/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode63s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode74s1hbhb">#</a>
 </h4>
-<h4 id="common-coding-guidehahahugoshortcode63s2hbhb">
+<h4 id="common-coding-guidehahahugoshortcode74s2hbhb">
   <a href="/how-to-contribute/code-style-and-quality-common/">Common Coding Guide</a>
-  <a class="anchor" href="#common-coding-guidehahahugoshortcode63s2hbhb">#</a>
+  <a class="anchor" href="#common-coding-guidehahahugoshortcode74s2hbhb">#</a>
 </h4>
-<h4 id="java-language-guidehahahugoshortcode63s3hbhb">
+<h4 id="java-language-guidehahahugoshortcode74s3hbhb">
   <a href="/how-to-contribute/code-style-and-quality-java/">Java Language Guide</a>
-  <a class="anchor" href="#java-language-guidehahahugoshortcode63s3hbhb">#</a>
+  <a class="anchor" href="#java-language-guidehahahugoshortcode74s3hbhb">#</a>
 </h4>
-<h4 id="scala-language-guidehahahugoshortcode63s4hbhb">
+<h4 id="scala-language-guidehahahugoshortcode74s4hbhb">
   <a href="/how-to-contribute/code-style-and-quality-scala/">Scala Language Guide</a>
-  <a class="anchor" href="#scala-language-guidehahahugoshortcode63s4hbhb">#</a>
+  <a class="anchor" href="#scala-language-guidehahahugoshortcode74s4hbhb">#</a>
 </h4>
-<h4 id="components-guidehahahugoshortcode63s5hbhb">
+<h4 id="components-guidehahahugoshortcode74s5hbhb">
   <a href="/how-to-contribute/code-style-and-quality-components/">Components Guide</a>
-  <a class="anchor" href="#components-guidehahahugoshortcode63s5hbhb">#</a>
+  <a class="anchor" href="#components-guidehahahugoshortcode74s5hbhb">#</a>
 </h4>
-<h4 id="formatting-guidehahahugoshortcode63s6hbhb">
+<h4 id="formatting-guidehahahugoshortcode74s6hbhb">
   <a href="/how-to-contribute/code-style-and-quality-formatting/">Formatting Guide</a>
-  <a class="anchor" href="#formatting-guidehahahugoshortcode63s6hbhb">#</a>
+  <a class="anchor" href="#formatting-guidehahahugoshortcode74s6hbhb">#</a>
 </h4>
 <hr>
 <p>This is an attempt to capture the code and quality standard that we want to maintain.</p>
diff --git a/content/how-to-contribute/code-style-and-quality-pull-requests/index.html b/content/how-to-contribute/code-style-and-quality-pull-requests/index.html
index 43904e6..720f08c 100644
--- a/content/how-to-contribute/code-style-and-quality-pull-requests/index.html
+++ b/content/how-to-contribute/code-style-and-quality-pull-requests/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/code-style-and-quality-pull-requests/" title="Code Style and Quality Guide — Pull Requests & Changes">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -479,33 +479,33 @@
   Code Style and Quality Guide — Pull Requests &amp; Changes
   <a class="anchor" href="#code-style-and-quality-guide--pull-requests--changes">#</a>
 </h1>
-<h4 id="preamblehahahugoshortcode66s0hbhb">
+<h4 id="preamblehahahugoshortcode58s0hbhb">
   <a href="/how-to-contribute/code-style-and-quality-preamble/">Preamble</a>
-  <a class="anchor" href="#preamblehahahugoshortcode66s0hbhb">#</a>
+  <a class="anchor" href="#preamblehahahugoshortcode58s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode66s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode58s1hbhb">
   <a href="/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode66s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode58s1hbhb">#</a>
 </h4>
-<h4 id="common-coding-guidehahahugoshortcode66s2hbhb">
+<h4 id="common-coding-guidehahahugoshortcode58s2hbhb">
   <a href="/how-to-contribute/code-style-and-quality-common/">Common Coding Guide</a>
-  <a class="anchor" href="#common-coding-guidehahahugoshortcode66s2hbhb">#</a>
+  <a class="anchor" href="#common-coding-guidehahahugoshortcode58s2hbhb">#</a>
 </h4>
-<h4 id="java-language-guidehahahugoshortcode66s3hbhb">
+<h4 id="java-language-guidehahahugoshortcode58s3hbhb">
   <a href="/how-to-contribute/code-style-and-quality-java/">Java Language Guide</a>
-  <a class="anchor" href="#java-language-guidehahahugoshortcode66s3hbhb">#</a>
+  <a class="anchor" href="#java-language-guidehahahugoshortcode58s3hbhb">#</a>
 </h4>
-<h4 id="scala-language-guidehahahugoshortcode66s4hbhb">
+<h4 id="scala-language-guidehahahugoshortcode58s4hbhb">
   <a href="/how-to-contribute/code-style-and-quality-scala/">Scala Language Guide</a>
-  <a class="anchor" href="#scala-language-guidehahahugoshortcode66s4hbhb">#</a>
+  <a class="anchor" href="#scala-language-guidehahahugoshortcode58s4hbhb">#</a>
 </h4>
-<h4 id="components-guidehahahugoshortcode66s5hbhb">
+<h4 id="components-guidehahahugoshortcode58s5hbhb">
   <a href="/how-to-contribute/code-style-and-quality-components/">Components Guide</a>
-  <a class="anchor" href="#components-guidehahahugoshortcode66s5hbhb">#</a>
+  <a class="anchor" href="#components-guidehahahugoshortcode58s5hbhb">#</a>
 </h4>
-<h4 id="formatting-guidehahahugoshortcode66s6hbhb">
+<h4 id="formatting-guidehahahugoshortcode58s6hbhb">
   <a href="/how-to-contribute/code-style-and-quality-formatting/">Formatting Guide</a>
-  <a class="anchor" href="#formatting-guidehahahugoshortcode66s6hbhb">#</a>
+  <a class="anchor" href="#formatting-guidehahahugoshortcode58s6hbhb">#</a>
 </h4>
 <hr>
 <p><strong>Rationale:</strong> We ask contributors to put in a little bit of extra effort to bring pull requests into a state that they can be more easily and more thoroughly reviewed. This helps the community in many ways:</p>
diff --git a/content/how-to-contribute/code-style-and-quality-scala/index.html b/content/how-to-contribute/code-style-and-quality-scala/index.html
index 935e6cd..495556f 100644
--- a/content/how-to-contribute/code-style-and-quality-scala/index.html
+++ b/content/how-to-contribute/code-style-and-quality-scala/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/code-style-and-quality-scala/" title="Code Style and Quality Guide — Scala">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -481,33 +481,33 @@
   Code Style and Quality Guide — Scala
   <a class="anchor" href="#code-style-and-quality-guide--scala">#</a>
 </h1>
-<h4 id="preamblehahahugoshortcode68s0hbhb">
+<h4 id="preamblehahahugoshortcode60s0hbhb">
   <a href="/how-to-contribute/code-style-and-quality-preamble/">Preamble</a>
-  <a class="anchor" href="#preamblehahahugoshortcode68s0hbhb">#</a>
+  <a class="anchor" href="#preamblehahahugoshortcode60s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode68s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode60s1hbhb">
   <a href="/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode68s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode60s1hbhb">#</a>
 </h4>
-<h4 id="common-coding-guidehahahugoshortcode68s2hbhb">
+<h4 id="common-coding-guidehahahugoshortcode60s2hbhb">
   <a href="/how-to-contribute/code-style-and-quality-common/">Common Coding Guide</a>
-  <a class="anchor" href="#common-coding-guidehahahugoshortcode68s2hbhb">#</a>
+  <a class="anchor" href="#common-coding-guidehahahugoshortcode60s2hbhb">#</a>
 </h4>
-<h4 id="java-language-guidehahahugoshortcode68s3hbhb">
+<h4 id="java-language-guidehahahugoshortcode60s3hbhb">
   <a href="/how-to-contribute/code-style-and-quality-java/">Java Language Guide</a>
-  <a class="anchor" href="#java-language-guidehahahugoshortcode68s3hbhb">#</a>
+  <a class="anchor" href="#java-language-guidehahahugoshortcode60s3hbhb">#</a>
 </h4>
-<h4 id="scala-language-guidehahahugoshortcode68s4hbhb">
+<h4 id="scala-language-guidehahahugoshortcode60s4hbhb">
   <a href="/how-to-contribute/code-style-and-quality-scala/">Scala Language Guide</a>
-  <a class="anchor" href="#scala-language-guidehahahugoshortcode68s4hbhb">#</a>
+  <a class="anchor" href="#scala-language-guidehahahugoshortcode60s4hbhb">#</a>
 </h4>
-<h4 id="components-guidehahahugoshortcode68s5hbhb">
+<h4 id="components-guidehahahugoshortcode60s5hbhb">
   <a href="/how-to-contribute/code-style-and-quality-components/">Components Guide</a>
-  <a class="anchor" href="#components-guidehahahugoshortcode68s5hbhb">#</a>
+  <a class="anchor" href="#components-guidehahahugoshortcode60s5hbhb">#</a>
 </h4>
-<h4 id="formatting-guidehahahugoshortcode68s6hbhb">
+<h4 id="formatting-guidehahahugoshortcode60s6hbhb">
   <a href="/how-to-contribute/code-style-and-quality-formatting/">Formatting Guide</a>
-  <a class="anchor" href="#formatting-guidehahahugoshortcode68s6hbhb">#</a>
+  <a class="anchor" href="#formatting-guidehahahugoshortcode60s6hbhb">#</a>
 </h4>
 <h2 id="scala-language-features">
   Scala Language Features
diff --git a/content/how-to-contribute/contribute-code/index.html b/content/how-to-contribute/contribute-code/index.html
index 0cf1923..40f0d59 100644
--- a/content/how-to-contribute/contribute-code/index.html
+++ b/content/how-to-contribute/contribute-code/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/contribute-code/" title="贡献代码">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/contribute-documentation/index.html b/content/how-to-contribute/contribute-documentation/index.html
index a748db9..88fe1f5 100644
--- a/content/how-to-contribute/contribute-documentation/index.html
+++ b/content/how-to-contribute/contribute-documentation/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/contribute-documentation/" title="贡献文档">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/documentation-style-guide/index.html b/content/how-to-contribute/documentation-style-guide/index.html
index 3b1c89d..2d19c6f 100644
--- a/content/how-to-contribute/documentation-style-guide/index.html
+++ b/content/how-to-contribute/documentation-style-guide/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/documentation-style-guide/" title="文档样式指南">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/getting-help/index.html b/content/how-to-contribute/getting-help/index.html
index e3e9189..5292869 100644
--- a/content/how-to-contribute/getting-help/index.html
+++ b/content/how-to-contribute/getting-help/index.html
@@ -32,7 +32,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/getting-help/" title="获取帮助">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/improve-website/index.html b/content/how-to-contribute/improve-website/index.html
index a67b629..e5e5dd8 100644
--- a/content/how-to-contribute/improve-website/index.html
+++ b/content/how-to-contribute/improve-website/index.html
@@ -32,7 +32,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/improve-website/" title="贡献网站">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/index.html b/content/how-to-contribute/index.html
index 378ac4d..b59cdc1 100644
--- a/content/how-to-contribute/index.html
+++ b/content/how-to-contribute/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/" title="How to Contribute">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/how-to-contribute/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/overview/index.html b/content/how-to-contribute/overview/index.html
index 3d94a0c..fc06934 100644
--- a/content/how-to-contribute/overview/index.html
+++ b/content/how-to-contribute/overview/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/overview/" title="如何参与贡献">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/how-to-contribute/reviewing-prs/index.html b/content/how-to-contribute/reviewing-prs/index.html
index 832a118..029beab 100644
--- a/content/how-to-contribute/reviewing-prs/index.html
+++ b/content/how-to-contribute/reviewing-prs/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/how-to-contribute/reviewing-prs/" title="审核 Pull Request">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/index.html b/content/index.html
index f034a72..842ddf9 100644
--- a/content/index.html
+++ b/content/index.html
@@ -15,7 +15,7 @@
 
   <meta charset="UTF-8">
 <meta name="viewport" content="width=device-width, initial-scale=1.0">
-<meta name="description" content="Recent Flink blogs Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades July 31, 2025 - Ron Liu. The Apache Flink PMC is proud to announce the release of Apache Flink 2.1.0. This marks a significant milestone in the evolution of the real-time data processing engine into a unified Data &#43; AI … Continue reading Apache Flink 1.19.3 Release Announcement July 10, 2025 - Ferenc Csaky.">
+<meta name="description" content="Recent Flink blogs Apache Flink CDC 3.5.0 Release Announcement September 26, 2025 - Yanquan Lv. The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0! This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in … Continue reading Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades July 31, 2025 - Ron Liu.">
 <meta name="theme-color" content="#FFFFFF"><meta property="og:title" content="Apache Flink® — Stateful Computations over Data Streams" />
 <meta property="og:description" content="" />
 <meta property="og:type" content="website" />
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/" title="Apache Flink Documentation">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -633,6 +633,23 @@
     
         
           <div class="card">
+            <div class="heading">Apache Flink CDC 3.5.0 Release Announcement</div>
+            <div class="body">
+                <p>
+                  September 26, 2025 - 
+                  
+                    Yanquan Lv.
+                  
+                </p>
+                <p class="truncate">
+                  The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!
+This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in …
+                </p>
+                <a href="/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Continue reading</a>
+            </div>
+          </div>
+        
+          <div class="card">
             <div class="heading">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data + AI with Comprehensive Upgrades</div>
             <div class="body">
                 <p>
@@ -665,23 +682,6 @@
             </div>
           </div>
         
-          <div class="card">
-            <div class="heading">Apache Flink 1.20.2 Release Announcement</div>
-            <div class="body">
-                <p>
-                  July 10, 2025 - 
-                  
-                    Ferenc Csaky.
-                  
-                </p>
-                <p class="truncate">
-                  The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.20 series.
-This release includes 25 bug fixes, vulnerability fixes, and minor improvements for Flink 1.20. …
-                </p>
-                <a href="/2025/07/10/apache-flink-1.20.2-release-announcement/">Continue reading</a>
-            </div>
-          </div>
-        
     
         
     
diff --git a/content/index.xml b/content/index.xml
index b9db1bb..3880606 100644
--- a/content/index.xml
+++ b/content/index.xml
@@ -6,7 +6,7 @@
     <description>Recent content in Apache Flink® — Stateful Computations over Data Streams on Apache Flink</description>
     <generator>Hugo -- gohugo.io</generator>
     <language>en</language>
-    <lastBuildDate>Thu, 31 Jul 2025 00:00:00 +0000</lastBuildDate>
+    <lastBuildDate>Fri, 26 Sep 2025 08:00:00 +0000</lastBuildDate>
     <atom:link href="https://flink.apache.org/index.xml" rel="self" type="application/rss+xml" />
     <item>
       <title>Architecture</title>
@@ -254,6 +254,13 @@
       <description>Getting Help # Having a Question? # The Apache Flink community answers many user questions every day. You can search for answers and advice in the archives or reach out to the community for help and guidance.&#xA;User Mailing List # Many Flink users, contributors, and committers are subscribed to Flink&amp;rsquo;s user mailing list. The user mailing list is a very good place to ask for help.&#xA;Before posting to the mailing list, you can search the mailing list archives for email threads that discuss issues related to yours on the following websites.</description>
     </item>
     <item>
+      <title>Apache Flink CDC 3.5.0 Release Announcement</title>
+      <link>https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/</link>
+      <pubDate>Fri, 26 Sep 2025 08:00:00 +0000</pubDate>
+      <guid>https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/</guid>
+      <description>The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!&#xA;This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in multi-tables (with frequent table structure changes) synchronization scenario, many issues encountered in the transform and Schema evolution frameworks have also been fixed.&#xA;Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!</description>
+    </item>
+    <item>
       <title>Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</title>
       <link>https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/</link>
       <pubDate>Thu, 31 Jul 2025 00:00:00 +0000</pubDate>
diff --git a/content/material/index.html b/content/material/index.html
index db268c8..85a9771 100644
--- a/content/material/index.html
+++ b/content/material/index.html
@@ -32,7 +32,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/material/" title="素材">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/news/2025/09/26/release-cdc-3.5.0.html b/content/news/2025/09/26/release-cdc-3.5.0.html
new file mode 100644
index 0000000..307c7dc
--- /dev/null
+++ b/content/news/2025/09/26/release-cdc-3.5.0.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html>
+<html lang="en">
+  <head>
+    <title>https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/</title>
+    <link rel="canonical" href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">
+    <meta name="robots" content="noindex">
+    <meta charset="utf-8">
+    <meta http-equiv="refresh" content="0; url=https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">
+  </head>
+</html>
diff --git a/content/posts/index.html b/content/posts/index.html
index 4421dd2..5f5af4c 100644
--- a/content/posts/index.html
+++ b/content/posts/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,31 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </h3>
+    
+
+
+  September 26, 2025 -
+
+
+
+  Yanquan Lv
+
+
+
+
+    <p>The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!
+This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in multi-tables (with frequent table structure changes) synchronization scenario, many issues encountered in the transform and Schema evolution frameworks have also been fixed.
+Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!
+        <a href="/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">...</a>
+      
+    </p>
+    <a href="/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </h3>
     
@@ -695,31 +720,6 @@
     <a href="/2025/02/12/apache-flink-1.19.2-release-announcement/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2025/02/12/apache-flink-1.20.1-release-announcement/">Apache Flink 1.20.1 Release Announcement</a>
-    </h3>
-    
-
-
-  February 12, 2025 -
-
-
-
-  Alexander Fedulov
-
-
-
-
-    <p>The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.20 series.
-This release includes 75 bug fixes, vulnerability fixes, and minor improvements for Flink 1.20. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
-We highly recommend all users upgrade to Flink 1.20.1.
-        <a href="/2025/02/12/apache-flink-1.20.1-release-announcement/">...</a>
-      
-    </p>
-    <a href="/2025/02/12/apache-flink-1.20.1-release-announcement/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -766,6 +766,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/index.xml b/content/posts/index.xml
index f3fa5f5..e42c851 100644
--- a/content/posts/index.xml
+++ b/content/posts/index.xml
@@ -6,9 +6,16 @@
     <description>Recent content in Flink Blog on Apache Flink</description>
     <generator>Hugo -- gohugo.io</generator>
     <language>en</language>
-    <lastBuildDate>Thu, 31 Jul 2025 00:00:00 +0000</lastBuildDate>
+    <lastBuildDate>Fri, 26 Sep 2025 08:00:00 +0000</lastBuildDate>
     <atom:link href="https://flink.apache.org/posts/index.xml" rel="self" type="application/rss+xml" />
     <item>
+      <title>Apache Flink CDC 3.5.0 Release Announcement</title>
+      <link>https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/</link>
+      <pubDate>Fri, 26 Sep 2025 08:00:00 +0000</pubDate>
+      <guid>https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/</guid>
+      <description>The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!&#xA;This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in multi-tables (with frequent table structure changes) synchronization scenario, many issues encountered in the transform and Schema evolution frameworks have also been fixed.&#xA;Flink CDC release packages are available at Releases Page, and documentations are available at Flink CDC documentation page. Looking forward to any feedback from the community through the Flink mailing lists or JIRA!</description>
+    </item>
+    <item>
       <title>Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</title>
       <link>https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/</link>
       <pubDate>Thu, 31 Jul 2025 00:00:00 +0000</pubDate>
diff --git a/content/posts/page/10/index.html b/content/posts/page/10/index.html
index b5f8200..43f86b2 100644
--- a/content/posts/page/10/index.html
+++ b/content/posts/page/10/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,33 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/">Sort-Based Blocking Shuffle Implementation in Flink - Part Two</a>
+    </h3>
+    
+
+
+  October 26, 2021 -
+
+
+
+  Yingjie Cao (Kevin)
+
+
+  Daisy Tsang
+
+
+
+
+    <p>Part one of this blog post explained the motivation behind introducing sort-based blocking shuffle, presented benchmark results, and provided guidelines on how to use this new feature.
+Like sort-merge shuffle implemented by other distributed data processing frameworks, the whole sort-based shuffle process in Flink consists of several important stages, including collecting data in memory, sorting the collected data in memory, spilling the sorted data to files, and reading the shuffle data from these spilled files.
+        <a href="/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/">...</a>
+      
+    </p>
+    <a href="/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2021/10/19/apache-flink-1.13.3-released/">Apache Flink 1.13.3 Released</a>
     </h3>
     
@@ -728,32 +755,6 @@
     <a href="/2021/08/06/apache-flink-1.13.2-released/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2021/07/07/how-to-identify-the-source-of-backpressure/">How to identify the source of backpressure?</a>
-    </h3>
-    
-
-
-  July 7, 2021 -
-
-
-
-  Piotr Nowojski
-
-  <a href="https://twitter.com/PiotrNowojski">(@PiotrNowojski)</a>
-  
-
-
-
-    <p>Backpressure monitoring in the web UI
-The backpressure topic was tackled from different angles over the last couple of years. However, when it comes to identifying and analyzing sources of backpressure, things have changed quite a bit in the recent Flink releases (especially with new additions to metrics and the web UI in Flink 1.13). This post will try to clarify some of these changes and go into more detail about how to track down the source of backpressure, but first&hellip;
-        <a href="/2021/07/07/how-to-identify-the-source-of-backpressure/">...</a>
-      
-    </p>
-    <a href="/2021/07/07/how-to-identify-the-source-of-backpressure/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -800,6 +801,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/11/index.html b/content/posts/page/11/index.html
index 1dc8d26..547aa63 100644
--- a/content/posts/page/11/index.html
+++ b/content/posts/page/11/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,32 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2021/07/07/how-to-identify-the-source-of-backpressure/">How to identify the source of backpressure?</a>
+    </h3>
+    
+
+
+  July 7, 2021 -
+
+
+
+  Piotr Nowojski
+
+  <a href="https://twitter.com/PiotrNowojski">(@PiotrNowojski)</a>
+  
+
+
+
+    <p>Backpressure monitoring in the web UI
+The backpressure topic was tackled from different angles over the last couple of years. However, when it comes to identifying and analyzing sources of backpressure, things have changed quite a bit in the recent Flink releases (especially with new additions to metrics and the web UI in Flink 1.13). This post will try to clarify some of these changes and go into more detail about how to track down the source of backpressure, but first&hellip;
+        <a href="/2021/07/07/how-to-identify-the-source-of-backpressure/">...</a>
+      
+    </p>
+    <a href="/2021/07/07/how-to-identify-the-source-of-backpressure/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2021/05/28/apache-flink-1.13.1-released/">Apache Flink 1.13.1 Released</a>
     </h3>
     
@@ -722,33 +748,6 @@
     <a href="/2021/02/10/how-to-natively-deploy-flink-on-kubernetes-with-high-availability-ha/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2021/01/29/apache-flink-1.10.3-released/">Apache Flink 1.10.3 Released</a>
-    </h3>
-    
-
-
-  January 29, 2021 -
-
-
-
-  Xintong Song
-
-
-
-
-    <p>The Apache Flink community released the third bugfix version of the Apache Flink 1.10 series.
-This release includes 36 fixes and minor improvements for Flink 1.10.2. The list below includes a detailed list of all fixes and improvements.
-We highly recommend all users to upgrade to Flink 1.10.3.
-Updated Maven dependencies:
-&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-        <a href="/2021/01/29/apache-flink-1.10.3-released/">...</a>
-      
-    </p>
-    <a href="/2021/01/29/apache-flink-1.10.3-released/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -795,6 +794,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/12/index.html b/content/posts/page/12/index.html
index 114787d..46442b3 100644
--- a/content/posts/page/12/index.html
+++ b/content/posts/page/12/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,33 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2021/01/29/apache-flink-1.10.3-released/">Apache Flink 1.10.3 Released</a>
+    </h3>
+    
+
+
+  January 29, 2021 -
+
+
+
+  Xintong Song
+
+
+
+
+    <p>The Apache Flink community released the third bugfix version of the Apache Flink 1.10 series.
+This release includes 36 fixes and minor improvements for Flink 1.10.2. The list below includes a detailed list of all fixes and improvements.
+We highly recommend all users to upgrade to Flink 1.10.3.
+Updated Maven dependencies:
+&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.10.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
+        <a href="/2021/01/29/apache-flink-1.10.3-released/">...</a>
+      
+    </p>
+    <a href="/2021/01/29/apache-flink-1.10.3-released/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2021/01/19/apache-flink-1.12.1-released/">Apache Flink 1.12.1 Released</a>
     </h3>
     
@@ -713,33 +740,6 @@
     <a href="/2020/11/11/stateful-functions-2.2.1-release-announcement/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/">From Aligned to Unaligned Checkpoints - Part 1: Checkpoints, Alignment, and Backpressure</a>
-    </h3>
-    
-
-
-  October 15, 2020 -
-
-
-
-  Arvid Heise
-
-
-  Stephan Ewen
-
-
-
-
-    <p>Apache Flink’s checkpoint-based fault tolerance mechanism is one of its defining features. Because of that design, Flink unifies batch and stream processing, can easily scale to both very small and extremely large scenarios and provides support for many operational features like stateful upgrades with state evolution or roll-backs and time-travel.
-Despite all these great properties, Flink&rsquo;s checkpointing method has an Achilles Heel: the speed of a completed checkpoint is determined by the speed at which data flows through the application.
-        <a href="/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/">...</a>
-      
-    </p>
-    <a href="/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -786,6 +786,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/13/index.html b/content/posts/page/13/index.html
index b0ef9a1..f4b2158 100644
--- a/content/posts/page/13/index.html
+++ b/content/posts/page/13/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,33 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/">From Aligned to Unaligned Checkpoints - Part 1: Checkpoints, Alignment, and Backpressure</a>
+    </h3>
+    
+
+
+  October 15, 2020 -
+
+
+
+  Arvid Heise
+
+
+  Stephan Ewen
+
+
+
+
+    <p>Apache Flink’s checkpoint-based fault tolerance mechanism is one of its defining features. Because of that design, Flink unifies batch and stream processing, can easily scale to both very small and extremely large scenarios and provides support for many operational features like stateful upgrades with state evolution or roll-backs and time-travel.
+Despite all these great properties, Flink&rsquo;s checkpointing method has an Achilles Heel: the speed of a completed checkpoint is determined by the speed at which data flows through the application.
+        <a href="/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/">...</a>
+      
+    </p>
+    <a href="/2020/10/15/from-aligned-to-unaligned-checkpoints-part-1-checkpoints-alignment-and-backpressure/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2020/10/13/stateful-functions-internals-behind-the-scenes-of-stateful-serverless/">Stateful Functions Internals: Behind the scenes of Stateful Serverless</a>
     </h3>
     
@@ -712,37 +739,6 @@
     <a href="/2020/08/06/accelerating-your-workload-with-gpu-and-other-external-resources/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/">PyFlink: The integration of Pandas into PyFlink</a>
-    </h3>
-    
-
-
-  August 4, 2020 -
-
-
-
-  Jincheng Sun
-
-  <a href="https://twitter.com/sunjincheng121">(@sunjincheng121)</a>
-  
-
-  Markos Sfikas
-
-  <a href="https://twitter.com/MarkSfik">(@MarkSfik)</a>
-  
-
-
-
-    <p>Python has evolved into one of the most important programming languages for many fields of data processing. So big has been Python’s popularity, that it has pretty much become the default data processing language for data scientists. On top of that, there is a plethora of Python-based data processing tools such as NumPy, Pandas, and Scikit-learn that have gained additional popularity due to their flexibility or powerful functionalities.
-Pic source: VanderPlas 2017, slide 52.
-        <a href="/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/">...</a>
-      
-    </p>
-    <a href="/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -789,6 +785,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/14/index.html b/content/posts/page/14/index.html
index 537ff09..dca6ed9 100644
--- a/content/posts/page/14/index.html
+++ b/content/posts/page/14/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,37 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/">PyFlink: The integration of Pandas into PyFlink</a>
+    </h3>
+    
+
+
+  August 4, 2020 -
+
+
+
+  Jincheng Sun
+
+  <a href="https://twitter.com/sunjincheng121">(@sunjincheng121)</a>
+  
+
+  Markos Sfikas
+
+  <a href="https://twitter.com/MarkSfik">(@MarkSfik)</a>
+  
+
+
+
+    <p>Python has evolved into one of the most important programming languages for many fields of data processing. So big has been Python’s popularity, that it has pretty much become the default data processing language for data scientists. On top of that, there is a plethora of Python-based data processing tools such as NumPy, Pandas, and Scikit-learn that have gained additional popularity due to their flexibility or powerful functionalities.
+Pic source: VanderPlas 2017, slide 52.
+        <a href="/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/">...</a>
+      
+    </p>
+    <a href="/2020/08/04/pyflink-the-integration-of-pandas-into-pyflink/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2020/07/30/advanced-flink-application-patterns-vol.3-custom-window-processing/">Advanced Flink Application Patterns Vol.3: Custom Window Processing</a>
     </h3>
     
@@ -708,32 +739,6 @@
     <a href="/2020/06/15/flink-on-zeppelin-notebooks-for-interactive-data-analysis-part-1/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2020/06/10/flink-community-update-june20/">Flink Community Update - June&#39;20</a>
-    </h3>
-    
-
-
-  June 10, 2020 -
-
-
-
-  Marta Paes
-
-  <a href="https://twitter.com/morsapaes">(@morsapaes)</a>
-  
-
-
-
-    <p>And suddenly it’s June. The previous month has been calm on the surface, but quite hectic underneath — the final testing phase for Flink 1.11 is moving at full speed, Stateful Functions 2.1 is out in the wild and Flink has made it into Google Season of Docs 2020.
-To top it off, a piece of good news: Flink Forward is back on October 19-22 as a free virtual event!
-        <a href="/2020/06/10/flink-community-update-june20/">...</a>
-      
-    </p>
-    <a href="/2020/06/10/flink-community-update-june20/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -780,6 +785,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/15/index.html b/content/posts/page/15/index.html
index fd7be02..b8e8578 100644
--- a/content/posts/page/15/index.html
+++ b/content/posts/page/15/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,32 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2020/06/10/flink-community-update-june20/">Flink Community Update - June&#39;20</a>
+    </h3>
+    
+
+
+  June 10, 2020 -
+
+
+
+  Marta Paes
+
+  <a href="https://twitter.com/morsapaes">(@morsapaes)</a>
+  
+
+
+
+    <p>And suddenly it’s June. The previous month has been calm on the surface, but quite hectic underneath — the final testing phase for Flink 1.11 is moving at full speed, Stateful Functions 2.1 is out in the wild and Flink has made it into Google Season of Docs 2020.
+To top it off, a piece of good news: Flink Forward is back on October 19-22 as a free virtual event!
+        <a href="/2020/06/10/flink-community-update-june20/">...</a>
+      
+    </p>
+    <a href="/2020/06/10/flink-community-update-june20/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2020/06/09/stateful-functions-2.1.0-release-announcement/">Stateful Functions 2.1.0 Release Announcement</a>
     </h3>
     
@@ -708,32 +734,6 @@
     <a href="/2020/04/07/stateful-functions-2.0-an-event-driven-database-on-apache-flink/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2020/03/30/flink-community-update-april20/">Flink Community Update - April&#39;20</a>
-    </h3>
-    
-
-
-  March 30, 2020 -
-
-
-
-  Marta Paes
-
-  <a href="https://twitter.com/morsapaes">(@morsapaes)</a>
-  
-
-
-
-    <p>While things slow down around us, the Apache Flink community is privileged to remain as active as ever. This blogpost combs through the past few months to give you an update on the state of things in Flink — from core releases to Stateful Functions; from some good old community stats to a new development blog.
-And since now it&rsquo;s more important than ever to keep up the spirits, we’d like to invite you to join the Flink Forward Virtual Conference, on April 22-24 (see Upcoming Events).
-        <a href="/2020/03/30/flink-community-update-april20/">...</a>
-      
-    </p>
-    <a href="/2020/03/30/flink-community-update-april20/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -780,6 +780,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/16/index.html b/content/posts/page/16/index.html
index b0d7238..e8149d9 100644
--- a/content/posts/page/16/index.html
+++ b/content/posts/page/16/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,32 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2020/03/30/flink-community-update-april20/">Flink Community Update - April&#39;20</a>
+    </h3>
+    
+
+
+  March 30, 2020 -
+
+
+
+  Marta Paes
+
+  <a href="https://twitter.com/morsapaes">(@morsapaes)</a>
+  
+
+
+
+    <p>While things slow down around us, the Apache Flink community is privileged to remain as active as ever. This blogpost combs through the past few months to give you an update on the state of things in Flink — from core releases to Stateful Functions; from some good old community stats to a new development blog.
+And since now it&rsquo;s more important than ever to keep up the spirits, we’d like to invite you to join the Flink Forward Virtual Conference, on April 22-24 (see Upcoming Events).
+        <a href="/2020/03/30/flink-community-update-april20/">...</a>
+      
+    </p>
+    <a href="/2020/03/30/flink-community-update-april20/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2020/03/27/flink-as-unified-engine-for-modern-data-warehousing-production-ready-hive-integration/">Flink as Unified Engine for Modern Data Warehousing: Production-Ready Hive Integration</a>
     </h3>
     
@@ -716,33 +742,6 @@
     <a href="/2020/01/15/advanced-flink-application-patterns-vol.1-case-study-of-a-fraud-detection-system/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2019/12/11/apache-flink-1.8.3-released/">Apache Flink 1.8.3 Released</a>
-    </h3>
-    
-
-
-  December 11, 2019 -
-
-
-
-  Hequn Cheng
-
-
-
-
-    <p>The Apache Flink community released the third bugfix version of the Apache Flink 1.8 series.
-This release includes 45 fixes and minor improvements for Flink 1.8.2. The list below includes a detailed list of all fixes and improvements.
-We highly recommend all users to upgrade to Flink 1.8.3.
-Updated Maven dependencies:
-&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-        <a href="/2019/12/11/apache-flink-1.8.3-released/">...</a>
-      
-    </p>
-    <a href="/2019/12/11/apache-flink-1.8.3-released/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -789,6 +788,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/17/index.html b/content/posts/page/17/index.html
index 7a7faf7..ea687c8 100644
--- a/content/posts/page/17/index.html
+++ b/content/posts/page/17/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,33 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2019/12/11/apache-flink-1.8.3-released/">Apache Flink 1.8.3 Released</a>
+    </h3>
+    
+
+
+  December 11, 2019 -
+
+
+
+  Hequn Cheng
+
+
+
+
+    <p>The Apache Flink community released the third bugfix version of the Apache Flink 1.8 series.
+This release includes 45 fixes and minor improvements for Flink 1.8.2. The list below includes a detailed list of all fixes and improvements.
+We highly recommend all users to upgrade to Flink 1.8.3.
+Updated Maven dependencies:
+&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.8.3&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
+        <a href="/2019/12/11/apache-flink-1.8.3-released/">...</a>
+      
+    </p>
+    <a href="/2019/12/11/apache-flink-1.8.3-released/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2019/11/25/how-to-query-pulsar-streams-using-apache-flink/">How to query Pulsar Streams using Apache Flink</a>
     </h3>
     
@@ -718,32 +745,6 @@
     <a href="/2019/07/02/apache-flink-1.8.1-released/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/">A Practical Guide to Broadcast State in Apache Flink</a>
-    </h3>
-    
-
-
-  June 26, 2019 -
-
-
-
-  Fabian Hueske
-
-  <a href="https://twitter.com/fhueske">(@fhueske)</a>
-  
-
-
-
-    <p>Since version 1.5.0, Apache Flink features a new type of state which is called Broadcast State. In this post, we explain what Broadcast State is, and show an example of how it can be applied to an application that evaluates dynamic patterns on an event stream. We walk you through the processing steps and the source code to implement this application in practice.
-What is Broadcast State? # The Broadcast State can be used to combine and jointly process two streams of events in a specific way.
-        <a href="/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/">...</a>
-      
-    </p>
-    <a href="/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -790,6 +791,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/18/index.html b/content/posts/page/18/index.html
index b49f687..6988e39 100644
--- a/content/posts/page/18/index.html
+++ b/content/posts/page/18/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,32 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/">A Practical Guide to Broadcast State in Apache Flink</a>
+    </h3>
+    
+
+
+  June 26, 2019 -
+
+
+
+  Fabian Hueske
+
+  <a href="https://twitter.com/fhueske">(@fhueske)</a>
+  
+
+
+
+    <p>Since version 1.5.0, Apache Flink features a new type of state which is called Broadcast State. In this post, we explain what Broadcast State is, and show an example of how it can be applied to an application that evaluates dynamic patterns on an event stream. We walk you through the processing steps and the source code to implement this application in practice.
+What is Broadcast State? # The Broadcast State can be used to combine and jointly process two streams of events in a specific way.
+        <a href="/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/">...</a>
+      
+    </p>
+    <a href="/2019/06/26/a-practical-guide-to-broadcast-state-in-apache-flink/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2019/06/05/a-deep-dive-into-flinks-network-stack/">A Deep-Dive into Flink&#39;s Network Stack</a>
     </h3>
     
@@ -700,31 +726,6 @@
     <a href="/2019/02/25/apache-flink-1.6.4-released/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2019/02/21/monitoring-apache-flink-applications-101/">Monitoring Apache Flink Applications 101</a>
-    </h3>
-    
-
-
-  February 21, 2019 -
-
-
-
-  Konstantin Knauf
-
-  <a href="https://twitter.com/snntrable">(@snntrable)</a>
-  
-
-
-
-    <p>This blog post provides an introduction to Apache Flink’s built-in monitoring and metrics system, that allows developers to effectively monitor their Flink jobs. Oftentimes, the task of picking the relevant metrics to monitor a Flink application can be overwhelming for a DevOps team that is just starting with stream processing and Apache Flink. Having worked with many organizations that deploy Flink at scale, I would like to share my experience and some best practice with the community.
-        <a href="/2019/02/21/monitoring-apache-flink-applications-101/">...</a>
-      
-    </p>
-    <a href="/2019/02/21/monitoring-apache-flink-applications-101/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -771,6 +772,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/19/index.html b/content/posts/page/19/index.html
index 60b2197..1a31845 100644
--- a/content/posts/page/19/index.html
+++ b/content/posts/page/19/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,31 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2019/02/21/monitoring-apache-flink-applications-101/">Monitoring Apache Flink Applications 101</a>
+    </h3>
+    
+
+
+  February 21, 2019 -
+
+
+
+  Konstantin Knauf
+
+  <a href="https://twitter.com/snntrable">(@snntrable)</a>
+  
+
+
+
+    <p>This blog post provides an introduction to Apache Flink’s built-in monitoring and metrics system, that allows developers to effectively monitor their Flink jobs. Oftentimes, the task of picking the relevant metrics to monitor a Flink application can be overwhelming for a DevOps team that is just starting with stream processing and Apache Flink. Having worked with many organizations that deploy Flink at scale, I would like to share my experience and some best practice with the community.
+        <a href="/2019/02/21/monitoring-apache-flink-applications-101/">...</a>
+      
+    </p>
+    <a href="/2019/02/21/monitoring-apache-flink-applications-101/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2019/02/15/apache-flink-1.7.2-released/">Apache Flink 1.7.2 Released</a>
     </h3>
     
@@ -703,30 +728,6 @@
     <a href="/2018/09/20/apache-flink-1.5.4-released/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2018/09/20/apache-flink-1.6.1-released/">Apache Flink 1.6.1 Released</a>
-    </h3>
-    
-
-
-  September 20, 2018 -
-
-
-
-
-
-    <p>The Apache Flink community released the first bugfix version of the Apache Flink 1.6 series.
-This release includes 60 fixes and minor improvements for Flink 1.6.1. The list below includes a detailed list of all fixes.
-We highly recommend all users to upgrade to Flink 1.6.1.
-Updated Maven dependencies:
-&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
-        <a href="/2018/09/20/apache-flink-1.6.1-released/">...</a>
-      
-    </p>
-    <a href="/2018/09/20/apache-flink-1.6.1-released/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -773,6 +774,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/2/index.html b/content/posts/page/2/index.html
index 9f2974f..dcd7db7 100644
--- a/content/posts/page/2/index.html
+++ b/content/posts/page/2/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,31 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2025/02/12/apache-flink-1.20.1-release-announcement/">Apache Flink 1.20.1 Release Announcement</a>
+    </h3>
+    
+
+
+  February 12, 2025 -
+
+
+
+  Alexander Fedulov
+
+
+
+
+    <p>The Apache Flink Community is pleased to announce the first bug fix release of the Flink 1.20 series.
+This release includes 75 bug fixes, vulnerability fixes, and minor improvements for Flink 1.20. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
+We highly recommend all users upgrade to Flink 1.20.1.
+        <a href="/2025/02/12/apache-flink-1.20.1-release-announcement/">...</a>
+      
+    </p>
+    <a href="/2025/02/12/apache-flink-1.20.1-release-announcement/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2025/01/21/apache-flink-cdc-3.3.0-release-announcement/">Apache Flink CDC 3.3.0 Release Announcement</a>
     </h3>
     
@@ -716,33 +741,6 @@
     <a href="/2024/07/02/apache-flink-kubernetes-operator-1.9.0-release-announcement/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/">Apache Flink CDC 3.1.1 Release Announcement</a>
-    </h3>
-    
-
-
-  June 18, 2024 -
-
-
-
-  Qingsheng Ren
-
-  <a href="https://twitter.com/renqstuite">(@renqstuite)</a>
-  
-
-
-
-    <p>The Apache Flink Community is pleased to announce the first bug fix release of the Flink CDC 3.1 series.
-The release contains fixes for several critical issues and improves compatibilities with Apache Flink. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
-We highly recommend all users to upgrade to Flink CDC 3.
-        <a href="/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/">...</a>
-      
-    </p>
-    <a href="/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -789,6 +787,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/20/index.html b/content/posts/page/20/index.html
index 6f476f8..af28fe5 100644
--- a/content/posts/page/20/index.html
+++ b/content/posts/page/20/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,30 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2018/09/20/apache-flink-1.6.1-released/">Apache Flink 1.6.1 Released</a>
+    </h3>
+    
+
+
+  September 20, 2018 -
+
+
+
+
+
+    <p>The Apache Flink community released the first bugfix version of the Apache Flink 1.6 series.
+This release includes 60 fixes and minor improvements for Flink 1.6.1. The list below includes a detailed list of all fixes.
+We highly recommend all users to upgrade to Flink 1.6.1.
+Updated Maven dependencies:
+&lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-java&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-streaming-java_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; &lt;dependency&gt; &lt;groupId&gt;org.apache.flink&lt;/groupId&gt; &lt;artifactId&gt;flink-clients_2.11&lt;/artifactId&gt; &lt;version&gt;1.6.1&lt;/version&gt; &lt;/dependency&gt; You can find the binaries on the updated Downloads page.
+        <a href="/2018/09/20/apache-flink-1.6.1-released/">...</a>
+      
+    </p>
+    <a href="/2018/09/20/apache-flink-1.6.1-released/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2018/08/21/apache-flink-1.5.3-released/">Apache Flink 1.5.3 Released</a>
     </h3>
     
@@ -702,37 +726,6 @@
     <a href="/2018/02/15/apache-flink-1.4.1-released/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/">Managing Large State in Apache Flink: An Intro to Incremental Checkpointing</a>
-    </h3>
-    
-
-
-  January 30, 2018 -
-
-
-
-  Stefan Ricther
-
-  <a href="https://twitter.com/StefanRRicther">(@StefanRRicther)</a>
-  
-
-  Chris Ward
-
-  <a href="https://twitter.com/chrischinch">(@chrischinch)</a>
-  
-
-
-
-    <p>Apache Flink was purpose-built for stateful stream processing. However, what is state in a stream processing application? I defined state and stateful stream processing in a previous blog post, and in case you need a refresher, state is defined as memory in an application&rsquo;s operators that stores information about previously-seen events that you can use to influence the processing of future events.
-State is a fundamental, enabling concept in stream processing required for a majority of complex use cases.
-        <a href="/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/">...</a>
-      
-    </p>
-    <a href="/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -779,6 +772,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/21/index.html b/content/posts/page/21/index.html
index 0f93547..7a42fba 100644
--- a/content/posts/page/21/index.html
+++ b/content/posts/page/21/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,37 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/">Managing Large State in Apache Flink: An Intro to Incremental Checkpointing</a>
+    </h3>
+    
+
+
+  January 30, 2018 -
+
+
+
+  Stefan Ricther
+
+  <a href="https://twitter.com/StefanRRicther">(@StefanRRicther)</a>
+  
+
+  Chris Ward
+
+  <a href="https://twitter.com/chrischinch">(@chrischinch)</a>
+  
+
+
+
+    <p>Apache Flink was purpose-built for stateful stream processing. However, what is state in a stream processing application? I defined state and stateful stream processing in a previous blog post, and in case you need a refresher, state is defined as memory in an application&rsquo;s operators that stores information about previously-seen events that you can use to influence the processing of future events.
+State is a fundamental, enabling concept in stream processing required for a majority of complex use cases.
+        <a href="/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/">...</a>
+      
+    </p>
+    <a href="/2018/01/30/managing-large-state-in-apache-flink-an-intro-to-incremental-checkpointing/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2017/12/21/apache-flink-in-2017-year-in-review/">Apache Flink in 2017: Year in Review</a>
     </h3>
     
@@ -705,27 +736,6 @@
     <a href="/2017/04/26/apache-flink-1.2.1-released/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2017/03/30/continuous-queries-on-dynamic-tables/">Continuous Queries on Dynamic Tables</a>
-    </h3>
-    
-
-
-  March 30, 2017 -
-
-
-
-
-
-    <p>Analyzing Data Streams with SQL # More and more companies are adopting stream processing and are migrating existing batch applications to streaming or implementing streaming solutions for new use cases. Many of those applications focus on analyzing streaming data. The data streams that are analyzed come from a wide variety of sources such as database transactions, clicks, sensor measurements, or IoT devices.
-Apache Flink is very well suited to power streaming analytics applications because it provides support for event-time semantics, stateful exactly-once processing, and achieves high throughput and low latency at the same time.
-        <a href="/2017/03/30/continuous-queries-on-dynamic-tables/">...</a>
-      
-    </p>
-    <a href="/2017/03/30/continuous-queries-on-dynamic-tables/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -772,6 +782,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/22/index.html b/content/posts/page/22/index.html
index 1d004ac..836473a 100644
--- a/content/posts/page/22/index.html
+++ b/content/posts/page/22/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,27 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2017/03/30/continuous-queries-on-dynamic-tables/">Continuous Queries on Dynamic Tables</a>
+    </h3>
+    
+
+
+  March 30, 2017 -
+
+
+
+
+
+    <p>Analyzing Data Streams with SQL # More and more companies are adopting stream processing and are migrating existing batch applications to streaming or implementing streaming solutions for new use cases. Many of those applications focus on analyzing streaming data. The data streams that are analyzed come from a wide variety of sources such as database transactions, clicks, sensor measurements, or IoT devices.
+Apache Flink is very well suited to power streaming analytics applications because it provides support for event-time semantics, stateful exactly-once processing, and achieves high throughput and low latency at the same time.
+        <a href="/2017/03/30/continuous-queries-on-dynamic-tables/">...</a>
+      
+    </p>
+    <a href="/2017/03/30/continuous-queries-on-dynamic-tables/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2017/03/29/from-streams-to-tables-and-back-again-an-update-on-flinks-table-sql-api/">From Streams to Tables and Back Again: An Update on Flink&#39;s Table &amp; SQL API</a>
     </h3>
     
@@ -670,28 +691,6 @@
     <a href="/2016/08/04/announcing-apache-flink-1.1.0/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2016/08/04/flink-1.1.1-released/">Flink 1.1.1 Released</a>
-    </h3>
-    
-
-
-  August 4, 2016 -
-
-
-
-
-
-    <p>Today, the Flink community released Flink version 1.1.1.
-The Maven artifacts published on Maven central for 1.1.0 had a Hadoop dependency issue: No Hadoop 1 specific version (with version 1.1.0-hadoop1) was deployed and 1.1.0 artifacts have a dependency on Hadoop 1 instead of Hadoop 2.
-This was fixed with this release and we highly recommend all users to use this version of Flink by bumping your Flink dependencies to version 1.
-        <a href="/2016/08/04/flink-1.1.1-released/">...</a>
-      
-    </p>
-    <a href="/2016/08/04/flink-1.1.1-released/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -738,6 +737,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/23/index.html b/content/posts/page/23/index.html
index 0f158d6..ca382de 100644
--- a/content/posts/page/23/index.html
+++ b/content/posts/page/23/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,28 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2016/08/04/flink-1.1.1-released/">Flink 1.1.1 Released</a>
+    </h3>
+    
+
+
+  August 4, 2016 -
+
+
+
+
+
+    <p>Today, the Flink community released Flink version 1.1.1.
+The Maven artifacts published on Maven central for 1.1.0 had a Hadoop dependency issue: No Hadoop 1 specific version (with version 1.1.0-hadoop1) was deployed and 1.1.0 artifacts have a dependency on Hadoop 1 instead of Hadoop 2.
+This was fixed with this release and we highly recommend all users to use this version of Flink by bumping your Flink dependencies to version 1.
+        <a href="/2016/08/04/flink-1.1.1-released/">...</a>
+      
+    </p>
+    <a href="/2016/08/04/flink-1.1.1-released/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2016/05/24/stream-processing-for-everyone-with-sql-and-apache-flink/">Stream Processing for Everyone with SQL and Apache Flink</a>
     </h3>
     
@@ -664,26 +686,6 @@
     <a href="/2015/12/18/flink-2015-a-year-in-review-and-a-lookout-to-2016/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/">Storm Compatibility in Apache Flink: How to run existing Storm topologies on Flink</a>
-    </h3>
-    
-
-
-  December 11, 2015 -
-
-
-
-
-
-    <p>Apache Storm was one of the first distributed and scalable stream processing systems available in the open source space offering (near) real-time tuple-by-tuple processing semantics. Initially released by the developers at Backtype in 2011 under the Eclipse open-source license, it became popular very quickly. Only shortly afterwards, Twitter acquired Backtype. Since then, Storm has been growing in popularity, is used in production at many big companies, and is the de-facto industry standard for big data stream processing.
-        <a href="/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/">...</a>
-      
-    </p>
-    <a href="/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -730,6 +732,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/24/index.html b/content/posts/page/24/index.html
index 4e6e66d..8c483ab 100644
--- a/content/posts/page/24/index.html
+++ b/content/posts/page/24/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,26 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/">Storm Compatibility in Apache Flink: How to run existing Storm topologies on Flink</a>
+    </h3>
+    
+
+
+  December 11, 2015 -
+
+
+
+
+
+    <p>Apache Storm was one of the first distributed and scalable stream processing systems available in the open source space offering (near) real-time tuple-by-tuple processing semantics. Initially released by the developers at Backtype in 2011 under the Eclipse open-source license, it became popular very quickly. Only shortly afterwards, Twitter acquired Backtype. Since then, Storm has been growing in popularity, is used in production at many big companies, and is the de-facto industry standard for big data stream processing.
+        <a href="/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/">...</a>
+      
+    </p>
+    <a href="/2015/12/11/storm-compatibility-in-apache-flink-how-to-run-existing-storm-topologies-on-flink/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2015/12/04/introducing-stream-windows-in-apache-flink/">Introducing Stream Windows in Apache Flink</a>
     </h3>
     
@@ -664,26 +684,6 @@
     <a href="/2015/05/14/april-2015-in-the-flink-community/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2015/05/11/juggling-with-bits-and-bytes/">Juggling with Bits and Bytes</a>
-    </h3>
-    
-
-
-  May 11, 2015 -
-
-
-
-
-
-    <p>How Apache Flink operates on binary data # Nowadays, a lot of open-source systems for analyzing large data sets are implemented in Java or other JVM-based programming languages. The most well-known example is Apache Hadoop, but also newer frameworks such as Apache Spark, Apache Drill, and also Apache Flink run on JVMs. A common challenge that JVM-based data analysis engines face is to store large amounts of data in memory - both for caching and for efficient processing such as sorting and joining of data.
-        <a href="/2015/05/11/juggling-with-bits-and-bytes/">...</a>
-      
-    </p>
-    <a href="/2015/05/11/juggling-with-bits-and-bytes/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -730,6 +730,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/25/index.html b/content/posts/page/25/index.html
index 7b83c4b..25d5d4d 100644
--- a/content/posts/page/25/index.html
+++ b/content/posts/page/25/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,26 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2015/05/11/juggling-with-bits-and-bytes/">Juggling with Bits and Bytes</a>
+    </h3>
+    
+
+
+  May 11, 2015 -
+
+
+
+
+
+    <p>How Apache Flink operates on binary data # Nowadays, a lot of open-source systems for analyzing large data sets are implemented in Java or other JVM-based programming languages. The most well-known example is Apache Hadoop, but also newer frameworks such as Apache Spark, Apache Drill, and also Apache Flink run on JVMs. A common challenge that JVM-based data analysis engines face is to store large amounts of data in memory - both for caching and for efficient processing such as sorting and joining of data.
+        <a href="/2015/05/11/juggling-with-bits-and-bytes/">...</a>
+      
+    </p>
+    <a href="/2015/05/11/juggling-with-bits-and-bytes/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2015/04/13/announcing-flink-0.9.0-milestone1-preview-release/">Announcing Flink 0.9.0-milestone1 preview release</a>
     </h3>
     
@@ -666,29 +686,6 @@
     <a href="/2014/11/18/hadoop-compatibility-in-flink/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2014/11/04/apache-flink-0.7.0-available/">Apache Flink 0.7.0 available</a>
-    </h3>
-    
-
-
-  November 4, 2014 -
-
-
-
-
-
-    <p>We are pleased to announce the availability of Flink 0.7.0. This release includes new user-facing features as well as performance and bug fixes, brings the Scala and Java APIs in sync, and introduces Flink Streaming. A total of 34 people have contributed to this release, a big thanks to all of them!
-Download Flink 0.7.0 here
-See the release changelog here
-Overview of major new features # Flink Streaming: The gem of the 0.
-        <a href="/2014/11/04/apache-flink-0.7.0-available/">...</a>
-      
-    </p>
-    <a href="/2014/11/04/apache-flink-0.7.0-available/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -735,6 +732,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/26/index.html b/content/posts/page/26/index.html
index 97b0b15..8d5888a 100644
--- a/content/posts/page/26/index.html
+++ b/content/posts/page/26/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,29 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2014/11/04/apache-flink-0.7.0-available/">Apache Flink 0.7.0 available</a>
+    </h3>
+    
+
+
+  November 4, 2014 -
+
+
+
+
+
+    <p>We are pleased to announce the availability of Flink 0.7.0. This release includes new user-facing features as well as performance and bug fixes, brings the Scala and Java APIs in sync, and introduces Flink Streaming. A total of 34 people have contributed to this release, a big thanks to all of them!
+Download Flink 0.7.0 here
+See the release changelog here
+Overview of major new features # Flink Streaming: The gem of the 0.
+        <a href="/2014/11/04/apache-flink-0.7.0-available/">...</a>
+      
+    </p>
+    <a href="/2014/11/04/apache-flink-0.7.0-available/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2014/10/03/upcoming-events/">Upcoming Events</a>
     </h3>
     
@@ -580,6 +603,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/3/index.html b/content/posts/page/3/index.html
index a0e1bf0..967e95f 100644
--- a/content/posts/page/3/index.html
+++ b/content/posts/page/3/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,33 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/">Apache Flink CDC 3.1.1 Release Announcement</a>
+    </h3>
+    
+
+
+  June 18, 2024 -
+
+
+
+  Qingsheng Ren
+
+  <a href="https://twitter.com/renqstuite">(@renqstuite)</a>
+  
+
+
+
+    <p>The Apache Flink Community is pleased to announce the first bug fix release of the Flink CDC 3.1 series.
+The release contains fixes for several critical issues and improves compatibilities with Apache Flink. Below you will find a list of all bugfixes and improvements (excluding improvements to the build infrastructure and build stability). For a complete list of all changes see: JIRA.
+We highly recommend all users to upgrade to Flink CDC 3.
+        <a href="/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/">...</a>
+      
+    </p>
+    <a href="/2024/06/18/apache-flink-cdc-3.1.1-release-announcement/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2024/06/14/apache-flink-1.19.1-release-announcement/">Apache Flink 1.19.1 Release Announcement</a>
     </h3>
     
@@ -731,48 +758,6 @@
     <a href="/2023/10/27/apache-flink-kubernetes-operator-1.6.1-release-announcement/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2023/10/24/announcing-the-release-of-apache-flink-1.18/">Announcing the Release of Apache Flink 1.18</a>
-    </h3>
-    
-
-
-  October 24, 2023 -
-
-
-
-  Jing Ge
-
-  <a href="https://twitter.com/jingengineer">(@jingengineer)</a>
-  
-
-  Konstantin Knauf
-
-  <a href="https://twitter.com/snntrable">(@snntrable)</a>
-  
-
-  Sergey Nuyanzin
-
-  <a href="https://twitter.com/uckamello">(@uckamello)</a>
-  
-
-  Qingsheng Ren
-
-  <a href="https://twitter.com/renqstuite">(@renqstuite)</a>
-  
-
-
-
-    <p>The Apache Flink PMC is pleased to announce the release of Apache Flink 1.18.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 174 people contributed to this release completing 18 FLIPS and 700+ issues. Thank you!
-Let&rsquo;s dive into the highlights.
-Towards a Streaming Lakehouse # Flink SQL Improvements # Introduce Flink JDBC Driver For SQL Gateway # Flink 1.
-        <a href="/2023/10/24/announcing-the-release-of-apache-flink-1.18/">...</a>
-      
-    </p>
-    <a href="/2023/10/24/announcing-the-release-of-apache-flink-1.18/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -819,6 +804,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/4/index.html b/content/posts/page/4/index.html
index c8b3c98..54e6590 100644
--- a/content/posts/page/4/index.html
+++ b/content/posts/page/4/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,48 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2023/10/24/announcing-the-release-of-apache-flink-1.18/">Announcing the Release of Apache Flink 1.18</a>
+    </h3>
+    
+
+
+  October 24, 2023 -
+
+
+
+  Jing Ge
+
+  <a href="https://twitter.com/jingengineer">(@jingengineer)</a>
+  
+
+  Konstantin Knauf
+
+  <a href="https://twitter.com/snntrable">(@snntrable)</a>
+  
+
+  Sergey Nuyanzin
+
+  <a href="https://twitter.com/uckamello">(@uckamello)</a>
+  
+
+  Qingsheng Ren
+
+  <a href="https://twitter.com/renqstuite">(@renqstuite)</a>
+  
+
+
+
+    <p>The Apache Flink PMC is pleased to announce the release of Apache Flink 1.18.0. As usual, we are looking at a packed release with a wide variety of improvements and new features. Overall, 174 people contributed to this release completing 18 FLIPS and 700+ issues. Thank you!
+Let&rsquo;s dive into the highlights.
+Towards a Streaming Lakehouse # Flink SQL Improvements # Introduce Flink JDBC Driver For SQL Gateway # Flink 1.
+        <a href="/2023/10/24/announcing-the-release-of-apache-flink-1.18/">...</a>
+      
+    </p>
+    <a href="/2023/10/24/announcing-the-release-of-apache-flink-1.18/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2023/09/19/stateful-functions-3.3.0-release-announcement/">Stateful Functions 3.3.0 Release Announcement</a>
     </h3>
     
@@ -710,32 +752,6 @@
     <a href="/2023/05/09/howto-migrate-a-real-life-batch-pipeline-from-the-dataset-api-to-the-datastream-api/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/">Howto create a batch source with the new Source framework</a>
-    </h3>
-    
-
-
-  May 3, 2023 -
-
-
-
-  Etienne Chauchot
-
-  <a href="https://twitter.com/echauchot">(@echauchot)</a>
-  
-
-
-
-    <p>Introduction # The Flink community has designed a new Source framework based on FLIP-27 lately. Some connectors have migrated to this new framework. This article is a how-to for creating a batch source using this new framework. It was built while implementing the Flink batch source for Cassandra. If you are interested in contributing or migrating connectors, this blog post is for you!
-Implementing the source components # The source architecture is depicted in the diagrams below:
-        <a href="/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/">...</a>
-      
-    </p>
-    <a href="/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -782,6 +798,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/5/index.html b/content/posts/page/5/index.html
index 5a66246..bf2e47a 100644
--- a/content/posts/page/5/index.html
+++ b/content/posts/page/5/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,32 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/">Howto create a batch source with the new Source framework</a>
+    </h3>
+    
+
+
+  May 3, 2023 -
+
+
+
+  Etienne Chauchot
+
+  <a href="https://twitter.com/echauchot">(@echauchot)</a>
+  
+
+
+
+    <p>Introduction # The Flink community has designed a new Source framework based on FLIP-27 lately. Some connectors have migrated to this new framework. This article is a how-to for creating a batch source using this new framework. It was built while implementing the Flink batch source for Cassandra. If you are interested in contributing or migrating connectors, this blog post is for you!
+Implementing the source components # The source architecture is depicted in the diagrams below:
+        <a href="/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/">...</a>
+      
+    </p>
+    <a href="/2023/05/03/howto-create-a-batch-source-with-the-new-source-framework/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2023/04/19/apache-flink-ml-2.2.0-release-announcement/">Apache Flink ML 2.2.0 Release Announcement</a>
     </h3>
     
@@ -723,29 +749,6 @@
     <a href="/2022/12/14/apache-flink-kubernetes-operator-1.3.0-release-announcement/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/">Optimising the throughput of async sinks using a custom RateLimitingStrategy</a>
-    </h3>
-    
-
-
-  November 25, 2022 -
-
-
-
-  Hong Liang Teoh
-
-
-
-
-    <p>Introduction # When designing a Flink data processing job, one of the key concerns is maximising job throughput. Sink throughput is a crucial factor because it can determine the entire job’s throughput. We generally want the highest possible write rate in the sink without overloading the destination. However, since the factors impacting a destination’s performance are variable over the job’s lifetime, the sink needs to adjust its write rate dynamically.
-        <a href="/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/">...</a>
-      
-    </p>
-    <a href="/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -792,6 +795,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/6/index.html b/content/posts/page/6/index.html
index 67e631a..e72bcee 100644
--- a/content/posts/page/6/index.html
+++ b/content/posts/page/6/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,29 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/">Optimising the throughput of async sinks using a custom RateLimitingStrategy</a>
+    </h3>
+    
+
+
+  November 25, 2022 -
+
+
+
+  Hong Liang Teoh
+
+
+
+
+    <p>Introduction # When designing a Flink data processing job, one of the key concerns is maximising job throughput. Sink throughput is a crucial factor because it can determine the entire job’s throughput. We generally want the highest possible write rate in the sink without overloading the destination. However, since the factors impacting a destination’s performance are variable over the job’s lifetime, the sink needs to adjust its write rate dynamically.
+        <a href="/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/">...</a>
+      
+    </p>
+    <a href="/2022/11/25/optimising-the-throughput-of-async-sinks-using-a-custom-ratelimitingstrategy/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2022/11/10/apache-flink-1.15.3-release-announcement/">Apache Flink 1.15.3 Release Announcement</a>
     </h3>
     
@@ -708,32 +731,6 @@
     <a href="/2022/07/25/apache-flink-kubernetes-operator-1.1.0-release-announcement/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2022/07/12/apache-flink-ml-2.1.0-release-announcement/">Apache Flink ML 2.1.0 Release Announcement</a>
-    </h3>
-    
-
-
-  July 12, 2022 -
-
-
-
-  Zhipeng Zhang
-
-
-  Dong Lin
-
-
-
-
-    <p>The Apache Flink community is excited to announce the release of Flink ML 2.1.0! This release focuses on improving Flink ML&rsquo;s infrastructure, such as Python SDK, memory management, and benchmark framework, to facilitate the development of performant, memory-safe, and easy-to-use algorithm libraries. We validated the enhanced infrastructure by implementing, benchmarking, and optimizing 10 new algorithms in Flink ML, and confirmed that Flink ML can meet or exceed the performance of selected algorithms from alternative popular ML libraries.
-        <a href="/2022/07/12/apache-flink-ml-2.1.0-release-announcement/">...</a>
-      
-    </p>
-    <a href="/2022/07/12/apache-flink-ml-2.1.0-release-announcement/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -780,6 +777,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/7/index.html b/content/posts/page/7/index.html
index 2a2e816..db97555 100644
--- a/content/posts/page/7/index.html
+++ b/content/posts/page/7/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,32 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2022/07/12/apache-flink-ml-2.1.0-release-announcement/">Apache Flink ML 2.1.0 Release Announcement</a>
+    </h3>
+    
+
+
+  July 12, 2022 -
+
+
+
+  Zhipeng Zhang
+
+
+  Dong Lin
+
+
+
+
+    <p>The Apache Flink community is excited to announce the release of Flink ML 2.1.0! This release focuses on improving Flink ML&rsquo;s infrastructure, such as Python SDK, memory management, and benchmark framework, to facilitate the development of performant, memory-safe, and easy-to-use algorithm libraries. We validated the enhanced infrastructure by implementing, benchmarking, and optimizing 10 new algorithms in Flink ML, and confirmed that Flink ML can meet or exceed the performance of selected algorithms from alternative popular ML libraries.
+        <a href="/2022/07/12/apache-flink-ml-2.1.0-release-announcement/">...</a>
+      
+    </p>
+    <a href="/2022/07/12/apache-flink-ml-2.1.0-release-announcement/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2022/07/11/flip-147-support-checkpoints-after-tasks-finished-part-one/">FLIP-147: Support Checkpoints After Tasks Finished - Part One</a>
     </h3>
     
@@ -721,35 +747,6 @@
     <a href="/2022/05/18/getting-into-low-latency-gears-with-apache-flink-part-one/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/">Apache Flink Table Store 0.1.0 Release Announcement</a>
-    </h3>
-    
-
-
-  May 11, 2022 -
-
-
-
-  Jingsong Lee
-
-
-  Jiangjie (Becket) Qin
-
-
-
-
-    <p>The Apache Flink community is pleased to announce the preview release of the Apache Flink Table Store (0.1.0).
-Please check out the full documentation for detailed information and user guides.
-Note: Flink Table Store is still in beta status and undergoing rapid development. We do not recommend that you use it directly in a production environment.
-What is Flink Table Store # In the past years, thanks to our numerous contributors and users, Apache Flink has established itself as one of the best distributed computing engines, especially for stateful stream processing at large scale.
-        <a href="/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/">...</a>
-      
-    </p>
-    <a href="/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -796,6 +793,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/8/index.html b/content/posts/page/8/index.html
index 9f2dfaf..cfaf1bf 100644
--- a/content/posts/page/8/index.html
+++ b/content/posts/page/8/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,35 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/">Apache Flink Table Store 0.1.0 Release Announcement</a>
+    </h3>
+    
+
+
+  May 11, 2022 -
+
+
+
+  Jingsong Lee
+
+
+  Jiangjie (Becket) Qin
+
+
+
+
+    <p>The Apache Flink community is pleased to announce the preview release of the Apache Flink Table Store (0.1.0).
+Please check out the full documentation for detailed information and user guides.
+Note: Flink Table Store is still in beta status and undergoing rapid development. We do not recommend that you use it directly in a production environment.
+What is Flink Table Store # In the past years, thanks to our numerous contributors and users, Apache Flink has established itself as one of the best distributed computing engines, especially for stateful stream processing at large scale.
+        <a href="/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/">...</a>
+      
+    </p>
+    <a href="/2022/05/11/apache-flink-table-store-0.1.0-release-announcement/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2022/05/06/exploring-the-thread-mode-in-pyflink/">Exploring the thread mode in PyFlink</a>
     </h3>
     
@@ -724,31 +753,6 @@
     <a href="/2022/01/31/stateful-functions-3.2.0-release-announcement/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2022/01/20/pravega-flink-connector-101/">Pravega Flink Connector 101</a>
-    </h3>
-    
-
-
-  January 20, 2022 -
-
-
-
-  Yumin Zhou (Brian)
-
-  <a href="https://twitter.com/crazy__zhou">(@crazy__zhou)</a>
-  
-
-
-
-    <p>Pravega, which is now a CNCF sandbox project, is a cloud-native storage system based on abstractions for both batch and streaming data consumption. Pravega streams (a new storage abstraction) are durable, consistent, and elastic, while natively supporting long-term data retention. In comparison, Apache Flink is a popular real-time computing engine that provides unified batch and stream processing. Flink provides high-throughput, low-latency computation, as well as support for complex event processing and state management.
-        <a href="/2022/01/20/pravega-flink-connector-101/">...</a>
-      
-    </p>
-    <a href="/2022/01/20/pravega-flink-connector-101/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -795,6 +799,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/posts/page/9/index.html b/content/posts/page/9/index.html
index 1e13186..c0946d4 100644
--- a/content/posts/page/9/index.html
+++ b/content/posts/page/9/index.html
@@ -24,7 +24,7 @@
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/posts/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -279,7 +279,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,6 +475,31 @@
   
   <article class="markdown book-post">
     <h3>
+      <a href="/2022/01/20/pravega-flink-connector-101/">Pravega Flink Connector 101</a>
+    </h3>
+    
+
+
+  January 20, 2022 -
+
+
+
+  Yumin Zhou (Brian)
+
+  <a href="https://twitter.com/crazy__zhou">(@crazy__zhou)</a>
+  
+
+
+
+    <p>Pravega, which is now a CNCF sandbox project, is a cloud-native storage system based on abstractions for both batch and streaming data consumption. Pravega streams (a new storage abstraction) are durable, consistent, and elastic, while natively supporting long-term data retention. In comparison, Apache Flink is a popular real-time computing engine that provides unified batch and stream processing. Flink provides high-throughput, low-latency computation, as well as support for complex event processing and state management.
+        <a href="/2022/01/20/pravega-flink-connector-101/">...</a>
+      
+    </p>
+    <a href="/2022/01/20/pravega-flink-connector-101/">Continue reading »</a>
+  </article>
+  
+  <article class="markdown book-post">
+    <h3>
       <a href="/2022/01/17/apache-flink-1.14.3-release-announcement/">Apache Flink 1.14.3 Release Announcement</a>
     </h3>
     
@@ -730,33 +755,6 @@
     <a href="/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-one/">Continue reading »</a>
   </article>
   
-  <article class="markdown book-post">
-    <h3>
-      <a href="/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/">Sort-Based Blocking Shuffle Implementation in Flink - Part Two</a>
-    </h3>
-    
-
-
-  October 26, 2021 -
-
-
-
-  Yingjie Cao (Kevin)
-
-
-  Daisy Tsang
-
-
-
-
-    <p>Part one of this blog post explained the motivation behind introducing sort-based blocking shuffle, presented benchmark results, and provided guidelines on how to use this new feature.
-Like sort-merge shuffle implemented by other distributed data processing frameworks, the whole sort-based shuffle process in Flink consists of several important stages, including collecting data in memory, sorting the collected data in memory, spilling the sorted data to files, and reading the shuffle data from these spilled files.
-        <a href="/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/">...</a>
-      
-    </p>
-    <a href="/2021/10/26/sort-based-blocking-shuffle-implementation-in-flink-part-two/">Continue reading »</a>
-  </article>
-  
 
   
     <ul class="pagination pagination-default">
@@ -803,6 +801,10 @@
   <ul>
     
     <li>
+      <a href="https://flink.apache.org/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Apache Flink CDC 3.5.0 Release Announcement</a>
+    </li>
+    
+    <li>
       <a href="https://flink.apache.org/2025/07/31/apache-flink-2.1.0-ushers-in-a-new-era-of-unified-real-time-data--ai-with-comprehensive-upgrades/">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades</a>
     </li>
     
diff --git a/content/sitemap.xml b/content/sitemap.xml
index 71db5fc..dc3e661 100644
--- a/content/sitemap.xml
+++ b/content/sitemap.xml
@@ -4,7 +4,7 @@
   <sitemap>
     <loc>https://flink.apache.org/en/sitemap.xml</loc>
     
-      <lastmod>2025-07-31T00:00:00+00:00</lastmod>
+      <lastmod>2025-09-26T08:00:00+00:00</lastmod>
     
   </sitemap>
   
diff --git a/content/tags/index.html b/content/tags/index.html
index 9894b4b..9cc1ca3 100644
--- a/content/tags/index.html
+++ b/content/tags/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/tags/" title="Tags">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/tags/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink-ml/index.html b/content/what-is-flink-ml/index.html
index 59c0950..2bef6fd 100644
--- a/content/what-is-flink-ml/index.html
+++ b/content/what-is-flink-ml/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink-ml/" title="What is Flink ML?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink-table-store/index.html b/content/what-is-flink-table-store/index.html
index 80c2efc..fb70b74 100644
--- a/content/what-is-flink-table-store/index.html
+++ b/content/what-is-flink-table-store/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink-table-store/" title="What is Paimon(incubating) (formerly Flink Table Store)?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/community/index.html b/content/what-is-flink/community/index.html
index 286b33d..505edcc 100644
--- a/content/what-is-flink/community/index.html
+++ b/content/what-is-flink/community/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/community/" title="社区 & 项目信息">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/flink-applications/index.html b/content/what-is-flink/flink-applications/index.html
index 217478f..bd70b30 100644
--- a/content/what-is-flink/flink-applications/index.html
+++ b/content/what-is-flink/flink-applications/index.html
@@ -32,7 +32,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/flink-applications/" title="应用">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/flink-architecture/index.html b/content/what-is-flink/flink-architecture/index.html
index 17984a6..fdb5991 100644
--- a/content/what-is-flink/flink-architecture/index.html
+++ b/content/what-is-flink/flink-architecture/index.html
@@ -32,7 +32,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/flink-architecture/" title="架构">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/flink-operations/index.html b/content/what-is-flink/flink-operations/index.html
index baf3bc6..6e40544 100644
--- a/content/what-is-flink/flink-operations/index.html
+++ b/content/what-is-flink/flink-operations/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/flink-operations/" title="运维">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/index.html b/content/what-is-flink/index.html
index 9863c1f..b2cfeff 100644
--- a/content/what-is-flink/index.html
+++ b/content/what-is-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/" title="About">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/what-is-flink/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/powered-by/index.html b/content/what-is-flink/powered-by/index.html
index d63aace..ee631a4 100644
--- a/content/what-is-flink/powered-by/index.html
+++ b/content/what-is-flink/powered-by/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/powered-by/" title="Flink 用户">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/roadmap/index.html b/content/what-is-flink/roadmap/index.html
index 9906cee..17f3dae 100644
--- a/content/what-is-flink/roadmap/index.html
+++ b/content/what-is-flink/roadmap/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/roadmap/" title="开发计划">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/security/index.html b/content/what-is-flink/security/index.html
index 0fac456..576c3a8 100644
--- a/content/what-is-flink/security/index.html
+++ b/content/what-is-flink/security/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/security/" title="Security">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/special-thanks/index.html b/content/what-is-flink/special-thanks/index.html
index 2bb83d6..dcfa3c1 100644
--- a/content/what-is-flink/special-thanks/index.html
+++ b/content/what-is-flink/special-thanks/index.html
@@ -40,7 +40,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/special-thanks/" title="特殊致谢">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -294,7 +294,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-flink/use-cases/index.html b/content/what-is-flink/use-cases/index.html
index 6f79f9a..9f9f9b9 100644
--- a/content/what-is-flink/use-cases/index.html
+++ b/content/what-is-flink/use-cases/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-flink/use-cases/" title="应用场景">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-stateful-functions/index.html b/content/what-is-stateful-functions/index.html
index 81052ca..bdbcc8c 100644
--- a/content/what-is-stateful-functions/index.html
+++ b/content/what-is-stateful-functions/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-stateful-functions/" title="What is Stateful Functions?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/what-is-the-flink-kubernetes-operator/index.html b/content/what-is-the-flink-kubernetes-operator/index.html
index ad95f4d..01d7913 100644
--- a/content/what-is-the-flink-kubernetes-operator/index.html
+++ b/content/what-is-the-flink-kubernetes-operator/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="zh" href="https://flink.apache.org/zh/what-is-the-flink-kubernetes-operator/" title="What is the Flink Kubernetes Operator?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/en.search.min.67ad45bac38c3fd100e53cf815b136b4d09a92e8b71c363e2937a3ac5b8bc82e.js" integrity="sha256-Z61FusOMP9EA5Tz4FbE2tNCakui3HDY&#43;KTejrFuLyC4="></script>
+<script defer src="/en.search.min.b58d961779f91cae8414117efac138dcbed605c935bfb22393047cf18fc734bd.js" integrity="sha256-tY2WF3n5HK6EFBF&#43;&#43;sE43L7WBck1v7IjkwR88Y/HNL0="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh.search-data.min.a53a1d5f39df3a7b6193a82c102bf76ca2ef8af7a10e97686f0ab442b4235f38.js b/content/zh.search-data.min.63f3cbc369ffa409cc7e1099cf5894af0626fa9074ece8aefbb7280436090376.js
similarity index 97%
rename from content/zh.search-data.min.a53a1d5f39df3a7b6193a82c102bf76ca2ef8af7a10e97686f0ab442b4235f38.js
rename to content/zh.search-data.min.63f3cbc369ffa409cc7e1099cf5894af0626fa9074ece8aefbb7280436090376.js
index 744464b..fee27b9 100644
--- a/content/zh.search-data.min.a53a1d5f39df3a7b6193a82c102bf76ca2ef8af7a10e97686f0ab442b4235f38.js
+++ b/content/zh.search-data.min.63f3cbc369ffa409cc7e1099cf5894af0626fa9074ece8aefbb7280436090376.js
@@ -184,7 +184,7 @@
 As of March 2023, the Flink community decided that upon release of a new Flink minor version, the community will perform one final bugfix release for resolved critical/blocker issues in the Flink minor version losing support. If 1.16.1 is the current release and 1.15.4 is the latest previous patch version, once 1.17.0 is released we will create a 1.15.5 to flush out any resolved critical/blocker issues.
 请注意,社区始终愿意讨论旧版本的 bugfix 版本。请在 dev@flink.apache.org 邮件列表中与开发人员联系。
 所有稳定版本 # 所有的 Flink 版本均可通过 https://archive.apache.org/dist/flink/ 获得,包括校验和加密签名。在撰写本文时,这包括以下版本:
-Apache Flink # Apache Flink 2.1.0 - 2025-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 2.0.0 - 2025-03-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.2 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.1 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.0 - 2024-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.3 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.2 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.1 - 2024-06-14 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.0 - 2024-03-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.1 - 2024-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.0 - 2023-10-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.2 - 2023-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.1 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.0 - 2023-03-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.3 - 2023-11-20 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.2 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.1 - 2023-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.0 - 2022-10-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.4 - 2023-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.3 - 2022-11-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.2 - 2022-08-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.1 - 2022-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.0 - 2022-05-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.6 - 2022-09-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.5 - 2022-06-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.4 - 2022-03-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.3 - 2022-01-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.2 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.0 - 2021-09-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.6 - 2022-02-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.5 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.3 - 2021-10-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.2 - 2021-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.1 - 2021-05-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.0 - 2021-04-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.7 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.5 - 2021-08-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.4 - 2021-05-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.3 - 2021-04-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.2 - 2021-03-03 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.1 - 2021-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.0 - 2020-12-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.6 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.4 - 2021-08-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.3 - 2020-12-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.2 - 2020-09-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.1 - 2020-07-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.0 - 2020-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.3 - 2021-01-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.2 - 2020-08-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.1 - 2020-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.0 - 2020-02-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.3 - 2020-04-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.2 - 2020-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.1 - 2019-10-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.0 - 2019-08-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.3 - 2019-12-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.2 - 2019-09-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.1 - 2019-07-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.0 - 2019-04-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.2 - 2019-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.1 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.0 - 2018-11-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.4 - 2019-02-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.3 - 2018-12-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.2 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.1 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.0 - 2018-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.6 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.5 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.4 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.3 - 2018-08-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.2 - 2018-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.1 - 2018-07-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.0 - 2018-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.2 - 2018-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.1 - 2018-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.0 - 2017-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.3 - 2018-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.2 - 2017-08-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.1 - 2017-06-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.0 - 2017-06-01 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.1 - 2017-04-26 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.0 - 2017-02-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.5 - 2017-03-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.4 - 2016-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.3 - 2016-10-13 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.2 - 2016-09-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.1 - 2016-08-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.0 - 2016-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.3 - 2016-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.2 - 2016-04-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.1 - 2016-04-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.0 - 2016-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 0.10.2 - 2016-02-11 (Source, Binaries) Apache Flink 0.10.1 - 2015-11-27 (Source, Binaries) Apache Flink 0.10.0 - 2015-11-16 (Source, Binaries) Apache Flink 0.9.1 - 2015-09-01 (Source, Binaries) Apache Flink 0.9.0 - 2015-06-24 (Source, Binaries) Apache Flink 0.9.0-milestone-1 - 2015-04-13 (Source, Binaries) Apache Flink 0.8.1 - 2015-02-20 (Source, Binaries) Apache Flink 0.8.0 - 2015-01-22 (Source, Binaries) Apache Flink 0.7.0-incubating - 2014-11-04 (Source, Binaries) Apache Flink 0.6.1-incubating - 2014-09-26 (Source, Binaries) Apache Flink 0.6-incubating - 2014-08-26 (Source, Binaries) Apache Flink connectors # Flink Elasticsearch Connector 3.0.0 - 2022-11-09 (Source) Flink AWS Connectors 3.0.0 - 2022-11-28 (Source) Flink Cassandra Connector 3.0.0 - 2022-11-30 (Source) Flink AWS Connectors 4.0.0 - 2022-12-09 (Source) Flink Pulsar Connector 3.0.0 - 2022-12-20 (Source) Flink JDBC Connector 3.0.0 - 2022-11-30 (Source) Flink RabbitMQ Connectors 3.0.0 - 2022-12-13 (Source) Flink Opensearch Connector 1.0.0 - 2022-12-21 (Source) Flink Google Cloud PubSub Connector 3.0.0 - 2023-01-31 (Source) Flink MongoDB Connector 1.0.0 - 2023-04-03 (Source) Flink AWS Connectors 4.1.0 - 2023-04-03 (Source) Flink Kafka Connector 3.0.0 - 2023-04-21 (Source) Flink MongoDB Connector 1.0.1 - 2023-04-24 (Source) Flink JDBC Connector 3.1.0 - 2023-05-05 (Source) Flink RabbitMQ Connectors 3.0.1 - 2023-05-08 (Source) Flink Elasticsearch Connector 3.0.1 - 2023-05-08 (Source) Flink Opensearch Connector 1.0.1 - 2023-05-08 (Source) Flink Pulsar Connector 4.0.0 - 2023-05-08 (Source) Flink Google Cloud PubSub Connector 3.0.1 - 2023-05-09 (Source) Flink Cassandra Connector 3.1.0 - 2023-05-25 (Source) Flink Pulsar Connector 3.0.1 - 2023-06-07 (Source) Flink JDBC Connector 3.1.1 - 2023-06-28 (Source) Flink MongoDB Connector 1.0.2 - 2023-08-15 (Source) Flink HBase Connector 3.0.0 - 2023-09-1 (Source) Flink Kafka Connector 3.0.1 - 2023-10-30 (Source) Flink AWS Connectors 4.2.0 - 2023-11-30 (Source) Flink Kafka Connector 3.0.2 - 2023-12-01 (Source) Flink Pulsar Connector 4.1.0 - 2023-12-28 (Source) Flink Google Cloud PubSub Connector 3.0.2 - 2024-01-12 (Source) Flink Opensearch Connector 1.1.0 - 2024-02-01 (Source) Flink Kafka Connector 3.1.0 - 2024-02-07 (Source) Flink MongoDB Connector 1.1.0 - 2024-02-19 (Source) Flink JDBC Connector 3.1.2 - 2024-02-21 (Source) Flink MongoDB Connector 1.2.0 - 2024-06-06 (Source) Flink Google Cloud PubSub Connector 3.1.0 - 2024-06-07 (Source) Flink AWS Connectors 4.3.0 - 2024-06-07 (Source) Flink Cassandra Connector 3.2.0 - 2024-06-07 (Source) Flink JDBC Connector 3.2.0 - 2024-06-07 (Source) Flink Kafka Connector 3.2.0 - 2024-06-07 (Source) Flink Opensearch Connector 2.0.0 - 2024-06-11 (Source) Flink Opensearch Connector 1.2.0 - 2024-06-11 (Source) Flink Kafka Connector 3.3.0 - 2024-10-17 (Source) Flink Prometheus Connector 1.0.0 - 2024-11-08 (Source) Flink AWS Connectors 5.0.0 - 2024-11-11 (Source) Flink Kafka Connector 3.4.0 - 2024-11-25 (Source) Flink HBase Connector 4.0.0 - 2024-11-26 (Source) Flink Hive Connector 3.0.0 - 2025-02-10 (Source) Flink MongoDB Connector 2.0.0 - 2025-03-17 (Source) Flink Kudu Connector 2.0.0 - 2025-04-14 (Source) Flink Elasticsearch Connector 3.1.0 - 2025-04-15 (Source) Flink Elasticsearch Connector 4.0.0 - 2025-04-17 (Source) Flink JDBC Connector 3.3.0 - 2025-04-22 (Source) Flink JDBC Connector 4.0.0 - 2025-04-22 (Source) Flink Kafka Connector 4.0.0 - 2025-04-24 (Source) Apache Flink CDC # Apache Flink CDC 3.4.0 - 2025-05-16 (Source, Binaries) Apache Flink CDC 3.3.0 - 2025-01-21 (Source, Binaries) Apache Flink CDC 3.2.1 - 2024-11-27 (Source, Binaries) Apache Flink CDC 3.2.0 - 2024-09-05 (Source, Binaries) Apache Flink CDC 3.1.1 - 2024-06-18 (Source, Binaries) Apache Flink CDC 3.1.0 - 2024-05-17 (Source, Binaries) Apache Flink Stateful Functions # Apache Flink Stateful Functions 3.3.0 - 2023-09-19 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.2.0 - 2022-01-27 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.1 - 2021-12-22 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.0 - 2021-08-30 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.0.0 - 2021-04-14 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.2 - 2021-01-02 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.1 - 2020-11-09 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.0 - 2020-09-28 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.1.0 - 2020-06-08 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.0.0 - 2020-04-02 (Source, Docs, Javadocs) Apache Flink Shaded # Apache Flink Shaded 20.0 - 2025-02-24 (Source) Apache Flink Shaded 19.0 - 2024-07-03 (Source) Apache Flink Shaded 18.0 - 2024-01-11 (Source) Apache Flink Shaded 17.0 - 2023-05-08 (Source) Apache Flink Shaded 16.2 - 2023-11-17 (Source) Apache Flink Shaded 16.1 - 2022-11-24 (Source) Apache Flink Shaded 16.0 - 2022-10-07 (Source) Apache Flink Shaded 15.0 - 2022-01-21 (Source) Apache Flink Shaded 14.0 - 2021-07-21 (Source) Apache Flink Shaded 13.0 - 2021-04-06 (Source) Apache Flink Shaded 12.0 - 2020-10-09 (Source) Apache Flink Shaded 11.0 - 2020-05-29 (Source) Apache Flink Shaded 10.0 - 2020-02-17 (Source) Apache Flink Shaded 9.0 - 2019-11-23 (Source) Apache Flink Shaded 8.0 - 2019-08-28 (Source) Apache Flink Shaded 7.0 - 2019-05-30 (Source) Apache Flink Shaded 6.0 - 2019-02-12 (Source) Apache Flink Shaded 5.0 - 2018-10-15 (Source) Apache Flink Shaded 4.0 - 2018-06-06 (Source) Apache Flink Shaded 3.0 - 2018-02-28 (Source) Apache Flink Shaded 2.0 - 2017-10-30 (Source) Apache Flink Shaded 1.0 - 2017-07-27 (Source) Apache Flink ML # Apache Flink ML 2.3.0 - 2023-07-01 (Source) Apache Flink ML 2.2.0 - 2023-04-19 (Source) Apache Flink ML 2.1.0 - 2022-07-12 (Source) Apache Flink ML 2.0.0 - 2021-01-07 (Source) Apache Flink Kubernetes Operator # Apache Flink Kubernetes Operator 1.12.1 - 2025-07-08 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.12.0 - 2025-05-28 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.11.0 - 2025-03-03 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.10.0 - 2024-10-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.9.0 - 2024-07-02 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.8.0 - 2024-03-21 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.7.0 - 2023-11-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.1 - 2023-10-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.0 - 2023-08-15 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.5.0 - 2023-05-17 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.4.0 - 2023-02-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.1 - 2023-01-10 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.0 - 2022-12-14 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.2.0 - 2022-10-05 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.1.0 - 2022-07-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.1 - 2022-06-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.0 - 2022-06-04 (Source, Helm Chart) Apache Flink Kubernetes Operator 0.1.0 - 2022-04-02 (Source, Helm Chart) Apache Flink Table Store # Apache Flink Table Store 0.3.0 - 2023-01-13 (Source, Binaries) Apache Flink Table Store 0.2.0 - 2022-08-29 (Source, Binaries) Apache Flink Table Store 0.1.0 - 2022-05-11 (Source, Binaries) `}),e.add({id:13,href:"/zh/what-is-flink/powered-by/",title:"Flink 用户",section:"About",content:` Powered By Flink # Apache Flink 为全球许多公司和企业的关键业务提供支持。在这个页面上,我们展示了一些著名的 Flink 用户,他们在生产中运行着有意思的用例,并提供了展示更详细信息的链接。
+Apache Flink # Apache Flink 2.1.0 - 2025-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 2.0.0 - 2025-03-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.2 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.1 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.20.0 - 2024-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.3 - 2025-07-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.2 - 2025-02-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.1 - 2024-06-14 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.19.0 - 2024-03-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.1 - 2024-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.18.0 - 2023-10-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.2 - 2023-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.1 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.17.0 - 2023-03-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.3 - 2023-11-20 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.2 - 2023-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.1 - 2023-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.16.0 - 2022-10-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.4 - 2023-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.3 - 2022-11-10 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.2 - 2022-08-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.1 - 2022-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.15.0 - 2022-05-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.6 - 2022-09-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.5 - 2022-06-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.4 - 2022-03-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.3 - 2022-01-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.2 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.14.0 - 2021-09-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.6 - 2022-02-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.5 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.3 - 2021-10-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.2 - 2021-08-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.1 - 2021-05-28 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.13.0 - 2021-04-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.7 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.5 - 2021-08-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.4 - 2021-05-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.3 - 2021-04-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.2 - 2021-03-03 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.1 - 2021-01-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.12.0 - 2020-12-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.6 - 2021-12-16 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.4 - 2021-08-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.3 - 2020-12-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.2 - 2020-09-17 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.1 - 2020-07-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.11.0 - 2020-07-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.3 - 2021-01-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.2 - 2020-08-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.1 - 2020-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.10.0 - 2020-02-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.3 - 2020-04-24 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.2 - 2020-01-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.1 - 2019-10-18 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.9.0 - 2019-08-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.3 - 2019-12-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.2 - 2019-09-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.1 - 2019-07-02 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.8.0 - 2019-04-09 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.2 - 2019-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.1 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.7.0 - 2018-11-30 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.4 - 2019-02-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.3 - 2018-12-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.2 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.1 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.6.0 - 2018-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.6 - 2018-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.5 - 2018-10-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.4 - 2018-09-19 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.3 - 2018-08-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.2 - 2018-07-31 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.1 - 2018-07-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.5.0 - 2018-05-25 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.2 - 2018-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.1 - 2018-02-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.4.0 - 2017-11-29 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.3 - 2018-03-15 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.2 - 2017-08-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.1 - 2017-06-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.3.0 - 2017-06-01 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.1 - 2017-04-26 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.2.0 - 2017-02-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.5 - 2017-03-22 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.4 - 2016-12-21 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.3 - 2016-10-13 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.2 - 2016-09-05 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.1 - 2016-08-11 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.1.0 - 2016-08-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.3 - 2016-05-12 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.2 - 2016-04-23 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.1 - 2016-04-06 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 1.0.0 - 2016-03-08 (Source, Binaries, Docs, Javadocs, Scaladocs ) Apache Flink 0.10.2 - 2016-02-11 (Source, Binaries) Apache Flink 0.10.1 - 2015-11-27 (Source, Binaries) Apache Flink 0.10.0 - 2015-11-16 (Source, Binaries) Apache Flink 0.9.1 - 2015-09-01 (Source, Binaries) Apache Flink 0.9.0 - 2015-06-24 (Source, Binaries) Apache Flink 0.9.0-milestone-1 - 2015-04-13 (Source, Binaries) Apache Flink 0.8.1 - 2015-02-20 (Source, Binaries) Apache Flink 0.8.0 - 2015-01-22 (Source, Binaries) Apache Flink 0.7.0-incubating - 2014-11-04 (Source, Binaries) Apache Flink 0.6.1-incubating - 2014-09-26 (Source, Binaries) Apache Flink 0.6-incubating - 2014-08-26 (Source, Binaries) Apache Flink connectors # Flink Elasticsearch Connector 3.0.0 - 2022-11-09 (Source) Flink AWS Connectors 3.0.0 - 2022-11-28 (Source) Flink Cassandra Connector 3.0.0 - 2022-11-30 (Source) Flink AWS Connectors 4.0.0 - 2022-12-09 (Source) Flink Pulsar Connector 3.0.0 - 2022-12-20 (Source) Flink JDBC Connector 3.0.0 - 2022-11-30 (Source) Flink RabbitMQ Connectors 3.0.0 - 2022-12-13 (Source) Flink Opensearch Connector 1.0.0 - 2022-12-21 (Source) Flink Google Cloud PubSub Connector 3.0.0 - 2023-01-31 (Source) Flink MongoDB Connector 1.0.0 - 2023-04-03 (Source) Flink AWS Connectors 4.1.0 - 2023-04-03 (Source) Flink Kafka Connector 3.0.0 - 2023-04-21 (Source) Flink MongoDB Connector 1.0.1 - 2023-04-24 (Source) Flink JDBC Connector 3.1.0 - 2023-05-05 (Source) Flink RabbitMQ Connectors 3.0.1 - 2023-05-08 (Source) Flink Elasticsearch Connector 3.0.1 - 2023-05-08 (Source) Flink Opensearch Connector 1.0.1 - 2023-05-08 (Source) Flink Pulsar Connector 4.0.0 - 2023-05-08 (Source) Flink Google Cloud PubSub Connector 3.0.1 - 2023-05-09 (Source) Flink Cassandra Connector 3.1.0 - 2023-05-25 (Source) Flink Pulsar Connector 3.0.1 - 2023-06-07 (Source) Flink JDBC Connector 3.1.1 - 2023-06-28 (Source) Flink MongoDB Connector 1.0.2 - 2023-08-15 (Source) Flink HBase Connector 3.0.0 - 2023-09-1 (Source) Flink Kafka Connector 3.0.1 - 2023-10-30 (Source) Flink AWS Connectors 4.2.0 - 2023-11-30 (Source) Flink Kafka Connector 3.0.2 - 2023-12-01 (Source) Flink Pulsar Connector 4.1.0 - 2023-12-28 (Source) Flink Google Cloud PubSub Connector 3.0.2 - 2024-01-12 (Source) Flink Opensearch Connector 1.1.0 - 2024-02-01 (Source) Flink Kafka Connector 3.1.0 - 2024-02-07 (Source) Flink MongoDB Connector 1.1.0 - 2024-02-19 (Source) Flink JDBC Connector 3.1.2 - 2024-02-21 (Source) Flink MongoDB Connector 1.2.0 - 2024-06-06 (Source) Flink Google Cloud PubSub Connector 3.1.0 - 2024-06-07 (Source) Flink AWS Connectors 4.3.0 - 2024-06-07 (Source) Flink Cassandra Connector 3.2.0 - 2024-06-07 (Source) Flink JDBC Connector 3.2.0 - 2024-06-07 (Source) Flink Kafka Connector 3.2.0 - 2024-06-07 (Source) Flink Opensearch Connector 2.0.0 - 2024-06-11 (Source) Flink Opensearch Connector 1.2.0 - 2024-06-11 (Source) Flink Kafka Connector 3.3.0 - 2024-10-17 (Source) Flink Prometheus Connector 1.0.0 - 2024-11-08 (Source) Flink AWS Connectors 5.0.0 - 2024-11-11 (Source) Flink Kafka Connector 3.4.0 - 2024-11-25 (Source) Flink HBase Connector 4.0.0 - 2024-11-26 (Source) Flink Hive Connector 3.0.0 - 2025-02-10 (Source) Flink MongoDB Connector 2.0.0 - 2025-03-17 (Source) Flink Kudu Connector 2.0.0 - 2025-04-14 (Source) Flink Elasticsearch Connector 3.1.0 - 2025-04-15 (Source) Flink Elasticsearch Connector 4.0.0 - 2025-04-17 (Source) Flink JDBC Connector 3.3.0 - 2025-04-22 (Source) Flink JDBC Connector 4.0.0 - 2025-04-22 (Source) Flink Kafka Connector 4.0.0 - 2025-04-24 (Source) Apache Flink CDC # Apache Flink CDC 3.5.0 - 2025-09-26 (Source, Binaries) Apache Flink CDC 3.4.0 - 2025-05-16 (Source, Binaries) Apache Flink CDC 3.3.0 - 2025-01-21 (Source, Binaries) Apache Flink CDC 3.2.1 - 2024-11-27 (Source, Binaries) Apache Flink CDC 3.2.0 - 2024-09-05 (Source, Binaries) Apache Flink CDC 3.1.1 - 2024-06-18 (Source, Binaries) Apache Flink CDC 3.1.0 - 2024-05-17 (Source, Binaries) Apache Flink Stateful Functions # Apache Flink Stateful Functions 3.3.0 - 2023-09-19 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.2.0 - 2022-01-27 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.1 - 2021-12-22 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.1.0 - 2021-08-30 (Source, Docs, Javadocs) Apache Flink Stateful Functions 3.0.0 - 2021-04-14 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.2 - 2021-01-02 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.1 - 2020-11-09 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.2.0 - 2020-09-28 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.1.0 - 2020-06-08 (Source, Docs, Javadocs) Apache Flink Stateful Functions 2.0.0 - 2020-04-02 (Source, Docs, Javadocs) Apache Flink Shaded # Apache Flink Shaded 20.0 - 2025-02-24 (Source) Apache Flink Shaded 19.0 - 2024-07-03 (Source) Apache Flink Shaded 18.0 - 2024-01-11 (Source) Apache Flink Shaded 17.0 - 2023-05-08 (Source) Apache Flink Shaded 16.2 - 2023-11-17 (Source) Apache Flink Shaded 16.1 - 2022-11-24 (Source) Apache Flink Shaded 16.0 - 2022-10-07 (Source) Apache Flink Shaded 15.0 - 2022-01-21 (Source) Apache Flink Shaded 14.0 - 2021-07-21 (Source) Apache Flink Shaded 13.0 - 2021-04-06 (Source) Apache Flink Shaded 12.0 - 2020-10-09 (Source) Apache Flink Shaded 11.0 - 2020-05-29 (Source) Apache Flink Shaded 10.0 - 2020-02-17 (Source) Apache Flink Shaded 9.0 - 2019-11-23 (Source) Apache Flink Shaded 8.0 - 2019-08-28 (Source) Apache Flink Shaded 7.0 - 2019-05-30 (Source) Apache Flink Shaded 6.0 - 2019-02-12 (Source) Apache Flink Shaded 5.0 - 2018-10-15 (Source) Apache Flink Shaded 4.0 - 2018-06-06 (Source) Apache Flink Shaded 3.0 - 2018-02-28 (Source) Apache Flink Shaded 2.0 - 2017-10-30 (Source) Apache Flink Shaded 1.0 - 2017-07-27 (Source) Apache Flink ML # Apache Flink ML 2.3.0 - 2023-07-01 (Source) Apache Flink ML 2.2.0 - 2023-04-19 (Source) Apache Flink ML 2.1.0 - 2022-07-12 (Source) Apache Flink ML 2.0.0 - 2021-01-07 (Source) Apache Flink Kubernetes Operator # Apache Flink Kubernetes Operator 1.12.1 - 2025-07-08 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.12.0 - 2025-05-28 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.11.0 - 2025-03-03 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.10.0 - 2024-10-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.9.0 - 2024-07-02 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.8.0 - 2024-03-21 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.7.0 - 2023-11-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.1 - 2023-10-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.6.0 - 2023-08-15 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.5.0 - 2023-05-17 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.4.0 - 2023-02-22 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.1 - 2023-01-10 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.3.0 - 2022-12-14 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.2.0 - 2022-10-05 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.1.0 - 2022-07-25 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.1 - 2022-06-27 (Source, Helm Chart) Apache Flink Kubernetes Operator 1.0.0 - 2022-06-04 (Source, Helm Chart) Apache Flink Kubernetes Operator 0.1.0 - 2022-04-02 (Source, Helm Chart) Apache Flink Table Store # Apache Flink Table Store 0.3.0 - 2023-01-13 (Source, Binaries) Apache Flink Table Store 0.2.0 - 2022-08-29 (Source, Binaries) Apache Flink Table Store 0.1.0 - 2022-05-11 (Source, Binaries) `}),e.add({id:13,href:"/zh/what-is-flink/powered-by/",title:"Flink 用户",section:"About",content:` Powered By Flink # Apache Flink 为全球许多公司和企业的关键业务提供支持。在这个页面上,我们展示了一些著名的 Flink 用户,他们在生产中运行着有意思的用例,并提供了展示更详细信息的链接。
 在项目的 wiki 页面中有一个 谁在使用 Flink 的页面,展示了更多的 Flink 用户。请注意,该列表并不全面。我们只添加明确要求列出的用户。
 如果你希望加入此页面,请通过 Flink 用户邮件列表 告诉我们。
 全球最大的零售商阿里巴巴(Alibaba)使用 Flink 的分支版本 Blink 来优化实时搜索排名。 阅读更多有关 Flink 在阿里巴巴扮演角色的信息 Amazon Managed Service for Apache Flink 是一项完全托管的 Amazon 服务;可以让您能够使用Apache Flink来处理和分析流数据。 BetterCloud 是一个多 SaaS 管理平台,它使用 Flink 从 SaaS 应用程序活动中获取近乎实时的智能。 请参阅 BetterCloud 在 Flink Forward SF 2017 上的分享 Bouygues Telecom 正在运行由 Flink 提供支持的 30 个生产应用程序,每天处理 100 亿个原始事件。 请参阅 Bouygues Telecom 在 Flink Forward 2016 上的分享 财富 500 强金融服务公司 Capital One 使用 Flink 进行实时活动监控和报警。 了解 Capital One 的欺诈检测用例 康卡斯特(Comcast)是一家全球媒体和技术公司,它使用 Flink 来实现机器学习模型和近实时事件流处理。 了解 Flink 在康卡斯特的应用 Criteo 是开放互联网的广告平台,使用 Flink 进行实时收入监控和近实时事件处理。 了解 Criteo 的 Flink 用例 滴滴出行是全球卓越的移动出行平台,使用 Apache Flink支持了实时监控、实时特征抽取、实时ETL等业务。 了解滴滴如何使用 Flink 的。 Drivetribe是由前“Top Gear”主持人创建的数字社区,它使用 Flink 作为指标和内容推荐。 了解 Flink 在 Drivetribe stack 的应用 Ebay 的监控平台由 Flink 提供支持,可在指标和日志流上计算上千条自定义报警规则。 了解更多 Flink 在 Ebay 的信息 爱立信使用 Flink 构建了一个实时异常检测器,通过大型基础设施进行机器学习。 阅读关于O&rsquo;Reilly想法的详细概述 Gojek 是一个超级 App: 拥有超过20种服务,并使用 Flink 为其自助平台提供支持,从而实现跨功能的数据驱动决策。 更多信息请访问 Gojek 工程师博客 华为是全球领先的 ICT 基础设施和智能设备供应商。华为云提供基于 Flink 的云服务。 了解Flink 如何为云服务提供动力 King,Candy Crush Saga的创建者,使用 Flink 为数据科学团队提供实时分析仪表板。 阅读 King 的 Flink 实现 Klaviyo使用 Apache Flink 扩展其实时分析系统,该系统每秒对超过一百万个事件进行重复数据删除和聚合。 阅读 Klaviyo 的实时分析 快手是中国领先的短视频分享 App,使用了 Apache Flink 搭建了一个实时监控平台,监控短视频和直播的质量。 阅读 Flink 在快手的应用实践 Lyft 使用 Flink 作为其流媒体平台的处理引擎,例如为机器学习持续生成特征。 阅读更多关于 Lyft 的流媒体 MediaMath 是一家程序化营销公司,它使用 Flink 为其实时报告基础架构提供支持。 请参阅 MediaMath 在 Flink Forward SF 2017 上的分享 Mux 是一家流媒体视频提供商的分析公司,它使用 Flink 进行实时异常检测和报警。 详细了解 Mux 如何使用 Flink OPPO, 作为中国最大的手机厂商之一,利用 Apache Flink 构建了实时数据仓库,用于即时分析运营活动效果及用户短期兴趣。 了解 OPPO 如何使用 Flink 全球第二大在线零售商奥托集团(Otto Group)使用 Flink 进行商业智能流处理。 请参阅 Otto 在 Flink Forward 2016 上的分享 OVH 使用 Flink 开发面向流的应用程序,比如实时商业智能系统或警报系统。 详细了解 OVH 如何使用 Flink Pinterest 使用基于 Apache Flink 的实时实验分析平台每天进行上千次的实验。 阅读更多在 Pinterest 有关实时实验分析的信息 Razorpay 是印度最大的支付门户网站之一,使用 Flink 构建了自己的内部平台 Mitra,用以扩展 AI 特征生成和实时模型服务。 阅读更多在 Razorpay 有关 Flink 数据分析的信息 ResearchGate 是科学家的社交网络,它使用 Flink 进行网络分析和近似重复检测。 请参阅 ResearchGate 在 Flink Forward 2016 上的分享 三星(SK telecom)是韩国最大的无线运营商。它在很多应用中使用了 Flink,包括智能工厂和移动应用程序。 了解其中一个 SK telecom 的使用案例。 Telefónica NEXT 的 TÜV 认证数据匿名平台由 Flink 提供支持。 了解更多关于 Telefónica NEXT 的信息 作为最大的互联网公司之一,腾讯利用 Apache Flink 构建了一个内部平台,以提高开发和操作实时应用程序的效率。 阅读有关腾讯平台的更多信息。 Uber 在 Apache Flink 上构建了基于 SQL 的开源流媒体分析平台 AthenaX。 更多信息请访问Uber工程博客 Vip,中国最大的品牌特卖网站之一,应用Flink实时的将数据流ETL到Hive中用于数据处理和分析. 详细了解Vip如何使用 Flink 小米,作为中国最大的专注于硬件与软件开发的公司之一,利用 Flink 构建了一个内部平台,以提高开发运维实时应用程序的效率,并用于实时推荐等场景。 详细了解小米如何使用 Flink 的。 Yelp 利用 Flink 为其数据连接器生态系统和流处理基础架构提供支持。 请参阅 Flink Forward 上的演讲 Zalando 是欧洲最大的电子商务公司之一,它使用 Flink 进行实时过程监控和 ETL。 更多信息请访问 Zalando 技术博客 `}),e.add({id:14,href:"/zh/documentation/flink-kubernetes-operator-master/",title:"Kubernetes Operator Main (snapshot)",section:"Documentation",content:" Flink Kubernetes Operator documentation (latest snapshot) # You can find the Flink Kubernetes Operator documentation for the latest snapshot here. "}),e.add({id:15,href:"/zh/getting-started/with-flink-stateful-functions/",title:"With Flink Stateful Functions",section:"教程",content:" Getting Started with Flink Stateful Functions # Read how you can get started with Flink Stateful Functions here. "}),e.add({id:16,href:"/zh/documentation/flink-cdc-stable/",title:"CDC $FlinkCDCStableShortVersion (stable)",section:"Documentation",content:" Flink CDC documentation (latest stable release) # You can find the Flink CDC documentation for the latest stable release here. "}),e.add({id:17,href:"/zh/getting-started/training-course/",title:"Training Course",section:"教程",content:" Training Course # Read all about the Flink Training Course here. "}),e.add({id:18,href:"/zh/what-is-flink/roadmap/",title:"开发计划",section:"About",content:` Roadmap # 导读: 此计划路线图旨在对Flink社区当前正在进行的项目进行总结摘要,并对这些项目根据工作内容进行分组。 鉴于Flink每个分组中现在都有非常多的工作正在进行,我们希望此计划书有助于用户和贡献者理解每个项目乃至于整个Flink的未来方向。 这个计划书既涵盖刚起步的项目,也包括接近完成的项目,这样可以使大家更好地了解各项目的发展方向以及当前的进展。
diff --git a/content/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js b/content/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js
similarity index 89%
copy from content/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js
copy to content/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js
index 8190c91..cf6316e 100644
--- a/content/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js
+++ b/content/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js
@@ -1 +1 @@
-"use strict";(function(){const e=document.querySelector("#book-search-input"),t=document.querySelector("#book-search-results");if(!e)return;e.addEventListener("focus",n),e.addEventListener("keyup",s),document.addEventListener("keypress",i);function i(t){if(e===document.activeElement)return;const n=String.fromCharCode(t.charCode);if(!a(n))return;e.focus(),t.preventDefault()}function a(t){const n=e.getAttribute("data-hotkeys")||"";return n.indexOf(t)>=0}function n(){e.removeEventListener("focus",n),e.required=!0,o("/flexsearch.min.js"),o("/zh.search-data.min.a53a1d5f39df3a7b6193a82c102bf76ca2ef8af7a10e97686f0ab442b4235f38.js",function(){e.required=!1,s()})}function s(){for(;t.firstChild;)t.removeChild(t.firstChild);if(!e.value)return;const n=window.bookSearchIndex.search(e.value,10);n.forEach(function(e){const n=r("<li><a href></a><small></small></li>"),s=n.querySelector("a"),o=n.querySelector("small");s.href=e.href,s.textContent=e.title,o.textContent=e.section,t.appendChild(n)})}function o(e,t){const n=document.createElement("script");n.defer=!0,n.async=!1,n.src=e,n.onload=t,document.head.appendChild(n)}function r(e){const t=document.createElement("div");return t.innerHTML=e,t.firstChild}})()
\ No newline at end of file
+"use strict";(function(){const e=document.querySelector("#book-search-input"),t=document.querySelector("#book-search-results");if(!e)return;e.addEventListener("focus",n),e.addEventListener("keyup",s),document.addEventListener("keypress",i);function i(t){if(e===document.activeElement)return;const n=String.fromCharCode(t.charCode);if(!a(n))return;e.focus(),t.preventDefault()}function a(t){const n=e.getAttribute("data-hotkeys")||"";return n.indexOf(t)>=0}function n(){e.removeEventListener("focus",n),e.required=!0,o("/flexsearch.min.js"),o("/zh.search-data.min.63f3cbc369ffa409cc7e1099cf5894af0626fa9074ece8aefbb7280436090376.js",function(){e.required=!1,s()})}function s(){for(;t.firstChild;)t.removeChild(t.firstChild);if(!e.value)return;const n=window.bookSearchIndex.search(e.value,10);n.forEach(function(e){const n=r("<li><a href></a><small></small></li>"),s=n.querySelector("a"),o=n.querySelector("small");s.href=e.href,s.textContent=e.title,o.textContent=e.section,t.appendChild(n)})}function o(e,t){const n=document.createElement("script");n.defer=!0,n.async=!1,n.src=e,n.onload=t,document.head.appendChild(n)}function r(e){const t=document.createElement("div");return t.innerHTML=e,t.firstChild}})()
\ No newline at end of file
diff --git a/content/zh/404.html b/content/zh/404.html
index 927d46e..b27c89d 100644
--- a/content/zh/404.html
+++ b/content/zh/404.html
@@ -15,7 +15,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/404.html" title="404 Page not found">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
diff --git a/content/zh/categories/index.html b/content/zh/categories/index.html
index 0e501de..23b86bf 100644
--- a/content/zh/categories/index.html
+++ b/content/zh/categories/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/categories/" title="Categories">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/zh/categories/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-cdc-master/index.html b/content/zh/documentation/flink-cdc-master/index.html
index 3d4fdd4..9950a82 100644
--- a/content/zh/documentation/flink-cdc-master/index.html
+++ b/content/zh/documentation/flink-cdc-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-cdc-master/" title="CDC Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-cdc-stable/index.html b/content/zh/documentation/flink-cdc-stable/index.html
index 5cbb6f7..efc44c3 100644
--- a/content/zh/documentation/flink-cdc-stable/index.html
+++ b/content/zh/documentation/flink-cdc-stable/index.html
@@ -22,13 +22,13 @@
 <meta property="og:url" content="https://flink.apache.org/zh/documentation/flink-cdc-stable/" /><meta property="article:section" content="documentation" />
 
 
-<title>CDC 3.4 (stable) | Apache Flink</title>
+<title>CDC 3.5 (stable) | Apache Flink</title>
 <link rel="manifest" href="/manifest.json">
 <link rel="icon" href="/favicon.png" type="image/x-icon">
-<link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-cdc-stable/" title="CDC 3.4 (stable)">
+<link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-cdc-stable/" title="CDC 3.5 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-kubernetes-operator-master/index.html b/content/zh/documentation/flink-kubernetes-operator-master/index.html
index 4a3ac09..ae07f7c 100644
--- a/content/zh/documentation/flink-kubernetes-operator-master/index.html
+++ b/content/zh/documentation/flink-kubernetes-operator-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-kubernetes-operator-master/" title="Kubernetes Operator Main (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-kubernetes-operator-stable/index.html b/content/zh/documentation/flink-kubernetes-operator-stable/index.html
index e5e8bb2..8161ad1 100644
--- a/content/zh/documentation/flink-kubernetes-operator-stable/index.html
+++ b/content/zh/documentation/flink-kubernetes-operator-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-kubernetes-operator-stable/" title="Kubernetes Operator 1.12 (latest)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-lts/index.html b/content/zh/documentation/flink-lts/index.html
index fbf0fcb..43add28 100644
--- a/content/zh/documentation/flink-lts/index.html
+++ b/content/zh/documentation/flink-lts/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-lts/" title="Flink 1.20 (LTS)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-master/index.html b/content/zh/documentation/flink-master/index.html
index a9d4998..a1610c2 100644
--- a/content/zh/documentation/flink-master/index.html
+++ b/content/zh/documentation/flink-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-master/" title="Flink Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-stable/index.html b/content/zh/documentation/flink-stable/index.html
index acd2b91..0251808 100644
--- a/content/zh/documentation/flink-stable/index.html
+++ b/content/zh/documentation/flink-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-stable/" title="Flink 2.1 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-stateful-functions-master/index.html b/content/zh/documentation/flink-stateful-functions-master/index.html
index 9bededa..9f276d4 100644
--- a/content/zh/documentation/flink-stateful-functions-master/index.html
+++ b/content/zh/documentation/flink-stateful-functions-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-stateful-functions-master/" title="Stateful Functions Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flink-stateful-functions-stable/index.html b/content/zh/documentation/flink-stateful-functions-stable/index.html
index 1245870..f99408d 100644
--- a/content/zh/documentation/flink-stateful-functions-stable/index.html
+++ b/content/zh/documentation/flink-stateful-functions-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flink-stateful-functions-stable/" title="Stateful Functions 3.3 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flinkml-master/index.html b/content/zh/documentation/flinkml-master/index.html
index 808bd94..4fbe3cd 100644
--- a/content/zh/documentation/flinkml-master/index.html
+++ b/content/zh/documentation/flinkml-master/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flinkml-master/" title="ML Master (snapshot)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/flinkml-stable/index.html b/content/zh/documentation/flinkml-stable/index.html
index f63f4dd..1c9f1fa 100644
--- a/content/zh/documentation/flinkml-stable/index.html
+++ b/content/zh/documentation/flinkml-stable/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/flinkml-stable/" title="ML 2.3 (stable)">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/documentation/index.html b/content/zh/documentation/index.html
index d6ca82d..541b055 100644
--- a/content/zh/documentation/index.html
+++ b/content/zh/documentation/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/documentation/" title="Documentation">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/zh/documentation/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/downloads/index.html b/content/zh/downloads/index.html
index 54f8a51..cddd7a7 100644
--- a/content/zh/downloads/index.html
+++ b/content/zh/downloads/index.html
@@ -42,7 +42,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/downloads/" title="Downloads">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -296,7 +296,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -1203,6 +1203,7 @@
   <a class="anchor" href="#apache-flink-cdc">#</a>
 </h3>
 <ul>
+<li>Apache Flink CDC 3.5.0 - 2025-09-26 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.5.0/flink-cdc-3.5.0-bin.tar.gz">Binaries</a>)</li>
 <li>Apache Flink CDC 3.4.0 - 2025-05-16 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.4.0/flink-cdc-3.4.0-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.4.0/flink-cdc-3.4.0-bin.tar.gz">Binaries</a>)</li>
 <li>Apache Flink CDC 3.3.0 - 2025-01-21 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.3.0/flink-cdc-3.3.0-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.3.0/flink-cdc-3.3.0-bin.tar.gz">Binaries</a>)</li>
 <li>Apache Flink CDC 3.2.1 - 2024-11-27 (<a href="https://archive.apache.org/dist/flink/flink-cdc-3.2.1/flink-cdc-3.2.1-src.tgz">Source</a>, <a href="https://archive.apache.org/dist/flink/flink-cdc-3.2.1/flink-cdc-3.2.1-bin.tar.gz">Binaries</a>)</li>
diff --git a/content/zh/flink-packages/index.html b/content/zh/flink-packages/index.html
index 797bfd7..2f79cd6 100644
--- a/content/zh/flink-packages/index.html
+++ b/content/zh/flink-packages/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/flink-packages/" title="flink-packages.org">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/getting-started/index.html b/content/zh/getting-started/index.html
index 4c91831..b8856a5 100644
--- a/content/zh/getting-started/index.html
+++ b/content/zh/getting-started/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/getting-started/" title="Getting Started">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/zh/getting-started/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/getting-started/training-course/index.html b/content/zh/getting-started/training-course/index.html
index f51dd9a..4107c52 100644
--- a/content/zh/getting-started/training-course/index.html
+++ b/content/zh/getting-started/training-course/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/getting-started/training-course/" title="Training Course">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/getting-started/with-flink-cdc/index.html b/content/zh/getting-started/with-flink-cdc/index.html
index ee80761..7f4c586 100644
--- a/content/zh/getting-started/with-flink-cdc/index.html
+++ b/content/zh/getting-started/with-flink-cdc/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/getting-started/with-flink-cdc/" title="With Flink CDC">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/getting-started/with-flink-kubernetes-operator/index.html b/content/zh/getting-started/with-flink-kubernetes-operator/index.html
index 47aa050..d0d7c5d 100644
--- a/content/zh/getting-started/with-flink-kubernetes-operator/index.html
+++ b/content/zh/getting-started/with-flink-kubernetes-operator/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/getting-started/with-flink-kubernetes-operator/" title="With Flink Kubernetes Operator">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/getting-started/with-flink-ml/index.html b/content/zh/getting-started/with-flink-ml/index.html
index 9fac8a8..dd78f7e 100644
--- a/content/zh/getting-started/with-flink-ml/index.html
+++ b/content/zh/getting-started/with-flink-ml/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/getting-started/with-flink-ml/" title="With Flink ML">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/getting-started/with-flink-stateful-functions/index.html b/content/zh/getting-started/with-flink-stateful-functions/index.html
index 2cc1d13..a5c334a 100644
--- a/content/zh/getting-started/with-flink-stateful-functions/index.html
+++ b/content/zh/getting-started/with-flink-stateful-functions/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/getting-started/with-flink-stateful-functions/" title="With Flink Stateful Functions">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/getting-started/with-flink/index.html b/content/zh/getting-started/with-flink/index.html
index e3289ba..1342763 100644
--- a/content/zh/getting-started/with-flink/index.html
+++ b/content/zh/getting-started/with-flink/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/getting-started/with-flink/" title="With Flink">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/code-style-and-quality-common/index.html b/content/zh/how-to-contribute/code-style-and-quality-common/index.html
index 573fbad..9e9354f 100644
--- a/content/zh/how-to-contribute/code-style-and-quality-common/index.html
+++ b/content/zh/how-to-contribute/code-style-and-quality-common/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/code-style-and-quality-common/" title="Code Style and Quality Guide — Common Rules">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -469,33 +469,33 @@
   Code Style and Quality Guide — Common Rules
   <a class="anchor" href="#code-style-and-quality-guide--common-rules">#</a>
 </h1>
-<h4 id="序言hahahugoshortcode55s0hbhb">
+<h4 id="序言hahahugoshortcode69s0hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-preamble/">序言</a>
-  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode55s0hbhb">#</a>
+  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode69s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode55s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode69s1hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode55s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode69s1hbhb">#</a>
 </h4>
-<h4 id="常用编码指南hahahugoshortcode55s2hbhb">
+<h4 id="常用编码指南hahahugoshortcode69s2hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-common/">常用编码指南</a>
-  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode55s2hbhb">#</a>
+  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode69s2hbhb">#</a>
 </h4>
-<h4 id="java-语言指南hahahugoshortcode55s3hbhb">
+<h4 id="java-语言指南hahahugoshortcode69s3hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-java/">Java 语言指南</a>
-  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode55s3hbhb">#</a>
+  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode69s3hbhb">#</a>
 </h4>
-<h4 id="scala-语言指南hahahugoshortcode55s4hbhb">
+<h4 id="scala-语言指南hahahugoshortcode69s4hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-scala/">Scala 语言指南</a>
-  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode55s4hbhb">#</a>
+  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode69s4hbhb">#</a>
 </h4>
-<h4 id="组件指南hahahugoshortcode55s5hbhb">
+<h4 id="组件指南hahahugoshortcode69s5hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-components/">组件指南</a>
-  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode55s5hbhb">#</a>
+  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode69s5hbhb">#</a>
 </h4>
-<h4 id="格式指南hahahugoshortcode55s6hbhb">
+<h4 id="格式指南hahahugoshortcode69s6hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-formatting/">格式指南</a>
-  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode55s6hbhb">#</a>
+  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode69s6hbhb">#</a>
 </h4>
 <hr>
 <h2 id="1-copyright">
diff --git a/content/zh/how-to-contribute/code-style-and-quality-components/index.html b/content/zh/how-to-contribute/code-style-and-quality-components/index.html
index 38e0597..61934c8 100644
--- a/content/zh/how-to-contribute/code-style-and-quality-components/index.html
+++ b/content/zh/how-to-contribute/code-style-and-quality-components/index.html
@@ -36,7 +36,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/code-style-and-quality-components/" title="Code Style and Quality Guide — Components Guide">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -290,7 +290,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,33 +475,33 @@
   Apache Flink 代码样式和质量指南 — 组件
   <a class="anchor" href="#apache-flink-%e4%bb%a3%e7%a0%81%e6%a0%b7%e5%bc%8f%e5%92%8c%e8%b4%a8%e9%87%8f%e6%8c%87%e5%8d%97--%e7%bb%84%e4%bb%b6">#</a>
 </h1>
-<h4 id="序言hahahugoshortcode64s0hbhb">
+<h4 id="序言hahahugoshortcode56s0hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-preamble/">序言</a>
-  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode64s0hbhb">#</a>
+  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode56s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode64s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode56s1hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode64s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode56s1hbhb">#</a>
 </h4>
-<h4 id="常用编码指南hahahugoshortcode64s2hbhb">
+<h4 id="常用编码指南hahahugoshortcode56s2hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-common/">常用编码指南</a>
-  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode64s2hbhb">#</a>
+  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode56s2hbhb">#</a>
 </h4>
-<h4 id="java-语言指南hahahugoshortcode64s3hbhb">
+<h4 id="java-语言指南hahahugoshortcode56s3hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-java/">Java 语言指南</a>
-  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode64s3hbhb">#</a>
+  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode56s3hbhb">#</a>
 </h4>
-<h4 id="scala-语言指南hahahugoshortcode64s4hbhb">
+<h4 id="scala-语言指南hahahugoshortcode56s4hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-scala/">Scala 语言指南</a>
-  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode64s4hbhb">#</a>
+  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode56s4hbhb">#</a>
 </h4>
-<h4 id="组件指南hahahugoshortcode64s5hbhb">
+<h4 id="组件指南hahahugoshortcode56s5hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-components/">组件指南</a>
-  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode64s5hbhb">#</a>
+  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode56s5hbhb">#</a>
 </h4>
-<h4 id="格式指南hahahugoshortcode64s6hbhb">
+<h4 id="格式指南hahahugoshortcode56s6hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-formatting/">格式指南</a>
-  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode64s6hbhb">#</a>
+  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode56s6hbhb">#</a>
 </h4>
 <h2 id="组件特定指南">
   组件特定指南
diff --git a/content/zh/how-to-contribute/code-style-and-quality-formatting/index.html b/content/zh/how-to-contribute/code-style-and-quality-formatting/index.html
index b50e4c1..b74a9fb 100644
--- a/content/zh/how-to-contribute/code-style-and-quality-formatting/index.html
+++ b/content/zh/how-to-contribute/code-style-and-quality-formatting/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/code-style-and-quality-formatting/" title="Code Style and Quality Guide — Formatting Guide">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -469,33 +469,33 @@
   Code Style and Quality Guide — Formatting Guide
   <a class="anchor" href="#code-style-and-quality-guide--formatting-guide">#</a>
 </h1>
-<h4 id="序言hahahugoshortcode57s0hbhb">
+<h4 id="序言hahahugoshortcode76s0hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-preamble/">序言</a>
-  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode57s0hbhb">#</a>
+  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode76s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode57s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode76s1hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode57s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode76s1hbhb">#</a>
 </h4>
-<h4 id="常用编码指南hahahugoshortcode57s2hbhb">
+<h4 id="常用编码指南hahahugoshortcode76s2hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-common/">常用编码指南</a>
-  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode57s2hbhb">#</a>
+  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode76s2hbhb">#</a>
 </h4>
-<h4 id="java-语言指南hahahugoshortcode57s3hbhb">
+<h4 id="java-语言指南hahahugoshortcode76s3hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-java/">Java 语言指南</a>
-  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode57s3hbhb">#</a>
+  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode76s3hbhb">#</a>
 </h4>
-<h4 id="scala-语言指南hahahugoshortcode57s4hbhb">
+<h4 id="scala-语言指南hahahugoshortcode76s4hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-scala/">Scala 语言指南</a>
-  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode57s4hbhb">#</a>
+  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode76s4hbhb">#</a>
 </h4>
-<h4 id="组件指南hahahugoshortcode57s5hbhb">
+<h4 id="组件指南hahahugoshortcode76s5hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-components/">组件指南</a>
-  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode57s5hbhb">#</a>
+  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode76s5hbhb">#</a>
 </h4>
-<h4 id="格式指南hahahugoshortcode57s6hbhb">
+<h4 id="格式指南hahahugoshortcode76s6hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-formatting/">格式指南</a>
-  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode57s6hbhb">#</a>
+  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode76s6hbhb">#</a>
 </h4>
 <h2 id="java-code-formatting-style">
   Java Code Formatting Style
diff --git a/content/zh/how-to-contribute/code-style-and-quality-java/index.html b/content/zh/how-to-contribute/code-style-and-quality-java/index.html
index a647df6..5c6cf3a 100644
--- a/content/zh/how-to-contribute/code-style-and-quality-java/index.html
+++ b/content/zh/how-to-contribute/code-style-and-quality-java/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/code-style-and-quality-java/" title="Code Style and Quality Guide — Java">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -467,33 +467,33 @@
   Code Style and Quality Guide — Java
   <a class="anchor" href="#code-style-and-quality-guide--java">#</a>
 </h1>
-<h4 id="序言hahahugoshortcode58s0hbhb">
+<h4 id="序言hahahugoshortcode73s0hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-preamble/">序言</a>
-  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode58s0hbhb">#</a>
+  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode73s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode58s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode73s1hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode58s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode73s1hbhb">#</a>
 </h4>
-<h4 id="常用编码指南hahahugoshortcode58s2hbhb">
+<h4 id="常用编码指南hahahugoshortcode73s2hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-common/">常用编码指南</a>
-  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode58s2hbhb">#</a>
+  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode73s2hbhb">#</a>
 </h4>
-<h4 id="java-语言指南hahahugoshortcode58s3hbhb">
+<h4 id="java-语言指南hahahugoshortcode73s3hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-java/">Java 语言指南</a>
-  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode58s3hbhb">#</a>
+  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode73s3hbhb">#</a>
 </h4>
-<h4 id="scala-语言指南hahahugoshortcode58s4hbhb">
+<h4 id="scala-语言指南hahahugoshortcode73s4hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-scala/">Scala 语言指南</a>
-  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode58s4hbhb">#</a>
+  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode73s4hbhb">#</a>
 </h4>
-<h4 id="组件指南hahahugoshortcode58s5hbhb">
+<h4 id="组件指南hahahugoshortcode73s5hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-components/">组件指南</a>
-  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode58s5hbhb">#</a>
+  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode73s5hbhb">#</a>
 </h4>
-<h4 id="格式指南hahahugoshortcode58s6hbhb">
+<h4 id="格式指南hahahugoshortcode73s6hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-formatting/">格式指南</a>
-  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode58s6hbhb">#</a>
+  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode73s6hbhb">#</a>
 </h4>
 <h2 id="java-language-features-and-libraries">
   Java Language Features and Libraries
diff --git a/content/zh/how-to-contribute/code-style-and-quality-preamble/index.html b/content/zh/how-to-contribute/code-style-and-quality-preamble/index.html
index cfde4c1..fc7f18e 100644
--- a/content/zh/how-to-contribute/code-style-and-quality-preamble/index.html
+++ b/content/zh/how-to-contribute/code-style-and-quality-preamble/index.html
@@ -40,7 +40,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/code-style-and-quality-preamble/" title="Code Style and Quality Guide">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -294,7 +294,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -479,33 +479,33 @@
   Apache Flink Code Style and Quality Guide
   <a class="anchor" href="#apache-flink-code-style-and-quality-guide">#</a>
 </h1>
-<h4 id="序言hahahugoshortcode56s0hbhb">
+<h4 id="序言hahahugoshortcode55s0hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-preamble/">序言</a>
-  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode56s0hbhb">#</a>
+  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode55s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode56s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode55s1hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode56s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode55s1hbhb">#</a>
 </h4>
-<h4 id="常用编码指南hahahugoshortcode56s2hbhb">
+<h4 id="常用编码指南hahahugoshortcode55s2hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-common/">常用编码指南</a>
-  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode56s2hbhb">#</a>
+  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode55s2hbhb">#</a>
 </h4>
-<h4 id="java-语言指南hahahugoshortcode56s3hbhb">
+<h4 id="java-语言指南hahahugoshortcode55s3hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-java/">Java 语言指南</a>
-  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode56s3hbhb">#</a>
+  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode55s3hbhb">#</a>
 </h4>
-<h4 id="scala-语言指南hahahugoshortcode56s4hbhb">
+<h4 id="scala-语言指南hahahugoshortcode55s4hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-scala/">Scala 语言指南</a>
-  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode56s4hbhb">#</a>
+  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode55s4hbhb">#</a>
 </h4>
-<h4 id="组件指南hahahugoshortcode56s5hbhb">
+<h4 id="组件指南hahahugoshortcode55s5hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-components/">组件指南</a>
-  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode56s5hbhb">#</a>
+  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode55s5hbhb">#</a>
 </h4>
-<h4 id="格式指南hahahugoshortcode56s6hbhb">
+<h4 id="格式指南hahahugoshortcode55s6hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-formatting/">格式指南</a>
-  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode56s6hbhb">#</a>
+  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode55s6hbhb">#</a>
 </h4>
 <hr>
 <p>这是对我们想要维护的代码和质量标准的一种尝试。</p>
diff --git a/content/zh/how-to-contribute/code-style-and-quality-pull-requests/index.html b/content/zh/how-to-contribute/code-style-and-quality-pull-requests/index.html
index 40bfcef..70ac176 100644
--- a/content/zh/how-to-contribute/code-style-and-quality-pull-requests/index.html
+++ b/content/zh/how-to-contribute/code-style-and-quality-pull-requests/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/code-style-and-quality-pull-requests/" title="Code Style and Quality Guide — Pull Requests & Changes">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -469,33 +469,33 @@
   Code Style and Quality Guide — Pull Requests &amp; Changes
   <a class="anchor" href="#code-style-and-quality-guide--pull-requests--changes">#</a>
 </h1>
-<h4 id="序言hahahugoshortcode67s0hbhb">
+<h4 id="序言hahahugoshortcode59s0hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-preamble/">序言</a>
-  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode67s0hbhb">#</a>
+  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode59s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode67s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode59s1hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode67s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode59s1hbhb">#</a>
 </h4>
-<h4 id="常用编码指南hahahugoshortcode67s2hbhb">
+<h4 id="常用编码指南hahahugoshortcode59s2hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-common/">常用编码指南</a>
-  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode67s2hbhb">#</a>
+  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode59s2hbhb">#</a>
 </h4>
-<h4 id="java-语言指南hahahugoshortcode67s3hbhb">
+<h4 id="java-语言指南hahahugoshortcode59s3hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-java/">Java 语言指南</a>
-  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode67s3hbhb">#</a>
+  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode59s3hbhb">#</a>
 </h4>
-<h4 id="scala-语言指南hahahugoshortcode67s4hbhb">
+<h4 id="scala-语言指南hahahugoshortcode59s4hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-scala/">Scala 语言指南</a>
-  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode67s4hbhb">#</a>
+  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode59s4hbhb">#</a>
 </h4>
-<h4 id="组件指南hahahugoshortcode67s5hbhb">
+<h4 id="组件指南hahahugoshortcode59s5hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-components/">组件指南</a>
-  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode67s5hbhb">#</a>
+  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode59s5hbhb">#</a>
 </h4>
-<h4 id="格式指南hahahugoshortcode67s6hbhb">
+<h4 id="格式指南hahahugoshortcode59s6hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-formatting/">格式指南</a>
-  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode67s6hbhb">#</a>
+  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode59s6hbhb">#</a>
 </h4>
 <hr>
 <p><strong>Rationale:</strong> We ask contributors to put in a little bit of extra effort to bring pull requests into a state that they can be more easily and more thoroughly reviewed. This helps the community in many ways:</p>
diff --git a/content/zh/how-to-contribute/code-style-and-quality-scala/index.html b/content/zh/how-to-contribute/code-style-and-quality-scala/index.html
index 37b7a46..120eff4 100644
--- a/content/zh/how-to-contribute/code-style-and-quality-scala/index.html
+++ b/content/zh/how-to-contribute/code-style-and-quality-scala/index.html
@@ -36,7 +36,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/code-style-and-quality-scala/" title="Code Style and Quality Guide — Scala">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -290,7 +290,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -475,33 +475,33 @@
   Code Style and Quality Guide — Scala
   <a class="anchor" href="#code-style-and-quality-guide--scala">#</a>
 </h1>
-<h4 id="序言hahahugoshortcode69s0hbhb">
+<h4 id="序言hahahugoshortcode61s0hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-preamble/">序言</a>
-  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode69s0hbhb">#</a>
+  <a class="anchor" href="#%e5%ba%8f%e8%a8%80hahahugoshortcode61s0hbhb">#</a>
 </h4>
-<h4 id="pull-requests--changeshahahugoshortcode69s1hbhb">
+<h4 id="pull-requests--changeshahahugoshortcode61s1hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-pull-requests/">Pull Requests &amp; Changes</a>
-  <a class="anchor" href="#pull-requests--changeshahahugoshortcode69s1hbhb">#</a>
+  <a class="anchor" href="#pull-requests--changeshahahugoshortcode61s1hbhb">#</a>
 </h4>
-<h4 id="常用编码指南hahahugoshortcode69s2hbhb">
+<h4 id="常用编码指南hahahugoshortcode61s2hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-common/">常用编码指南</a>
-  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode69s2hbhb">#</a>
+  <a class="anchor" href="#%e5%b8%b8%e7%94%a8%e7%bc%96%e7%a0%81%e6%8c%87%e5%8d%97hahahugoshortcode61s2hbhb">#</a>
 </h4>
-<h4 id="java-语言指南hahahugoshortcode69s3hbhb">
+<h4 id="java-语言指南hahahugoshortcode61s3hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-java/">Java 语言指南</a>
-  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode69s3hbhb">#</a>
+  <a class="anchor" href="#java-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode61s3hbhb">#</a>
 </h4>
-<h4 id="scala-语言指南hahahugoshortcode69s4hbhb">
+<h4 id="scala-语言指南hahahugoshortcode61s4hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-scala/">Scala 语言指南</a>
-  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode69s4hbhb">#</a>
+  <a class="anchor" href="#scala-%e8%af%ad%e8%a8%80%e6%8c%87%e5%8d%97hahahugoshortcode61s4hbhb">#</a>
 </h4>
-<h4 id="组件指南hahahugoshortcode69s5hbhb">
+<h4 id="组件指南hahahugoshortcode61s5hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-components/">组件指南</a>
-  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode69s5hbhb">#</a>
+  <a class="anchor" href="#%e7%bb%84%e4%bb%b6%e6%8c%87%e5%8d%97hahahugoshortcode61s5hbhb">#</a>
 </h4>
-<h4 id="格式指南hahahugoshortcode69s6hbhb">
+<h4 id="格式指南hahahugoshortcode61s6hbhb">
   <a href="/zh/how-to-contribute/code-style-and-quality-formatting/">格式指南</a>
-  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode69s6hbhb">#</a>
+  <a class="anchor" href="#%e6%a0%bc%e5%bc%8f%e6%8c%87%e5%8d%97hahahugoshortcode61s6hbhb">#</a>
 </h4>
 <h2 id="scala-语言特性">
   Scala 语言特性
diff --git a/content/zh/how-to-contribute/contribute-code/index.html b/content/zh/how-to-contribute/contribute-code/index.html
index 2748200..6dbaa7a 100644
--- a/content/zh/how-to-contribute/contribute-code/index.html
+++ b/content/zh/how-to-contribute/contribute-code/index.html
@@ -36,7 +36,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/contribute-code/" title="Contribute Code">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -290,7 +290,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/contribute-documentation/index.html b/content/zh/how-to-contribute/contribute-documentation/index.html
index 8b17fff..3755a94 100644
--- a/content/zh/how-to-contribute/contribute-documentation/index.html
+++ b/content/zh/how-to-contribute/contribute-documentation/index.html
@@ -46,7 +46,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/contribute-documentation/" title="Contribute Documentation">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -300,7 +300,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/documentation-style-guide/index.html b/content/zh/how-to-contribute/documentation-style-guide/index.html
index 7e58748..0542e03 100644
--- a/content/zh/how-to-contribute/documentation-style-guide/index.html
+++ b/content/zh/how-to-contribute/documentation-style-guide/index.html
@@ -38,7 +38,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/documentation-style-guide/" title="Documentation Style Guide">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -292,7 +292,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/getting-help/index.html b/content/zh/how-to-contribute/getting-help/index.html
index c2b255b..f7e7aa2 100644
--- a/content/zh/how-to-contribute/getting-help/index.html
+++ b/content/zh/how-to-contribute/getting-help/index.html
@@ -42,7 +42,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/getting-help/" title="Getting Help">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -296,7 +296,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/improve-website/index.html b/content/zh/how-to-contribute/improve-website/index.html
index be17f7b..4e9aea2 100644
--- a/content/zh/how-to-contribute/improve-website/index.html
+++ b/content/zh/how-to-contribute/improve-website/index.html
@@ -42,7 +42,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/improve-website/" title="Contribute to the Website">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -296,7 +296,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/index.html b/content/zh/how-to-contribute/index.html
index e71e2ac..8e20003 100644
--- a/content/zh/how-to-contribute/index.html
+++ b/content/zh/how-to-contribute/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/" title="How to Contribute">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/zh/how-to-contribute/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/overview/index.html b/content/zh/how-to-contribute/overview/index.html
index 2dc015d..00f09eb 100644
--- a/content/zh/how-to-contribute/overview/index.html
+++ b/content/zh/how-to-contribute/overview/index.html
@@ -32,7 +32,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/overview/" title="Overview">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -286,7 +286,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/how-to-contribute/reviewing-prs/index.html b/content/zh/how-to-contribute/reviewing-prs/index.html
index ab58fcd..fc64876 100644
--- a/content/zh/how-to-contribute/reviewing-prs/index.html
+++ b/content/zh/how-to-contribute/reviewing-prs/index.html
@@ -44,7 +44,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/how-to-contribute/reviewing-prs/" title="Review Pull Requests">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -298,7 +298,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/index.html b/content/zh/index.html
index aca60f4..afa9a32 100644
--- a/content/zh/index.html
+++ b/content/zh/index.html
@@ -15,7 +15,7 @@
 
   <meta charset="UTF-8">
 <meta name="viewport" content="width=device-width, initial-scale=1.0">
-<meta name="description" content="最新博客列表 Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades 2025年7月31日 - Ron Liu. The Apache Flink PMC is proud to announce the release of Apache Flink 2.1.0. This marks a significant milestone in the evolution of the real-time data processing engine into a unified Data &#43; AI … Continue reading Apache Flink 1.19.3 Release Announcement 2025年7月10日 - Ferenc Csaky. The Apache Flink Community is pleased to announce the third bug fix release of the Flink 1.">
+<meta name="description" content="最新博客列表 Apache Flink CDC 3.5.0 Release Announcement 2025年9月26日 - Yanquan Lv. The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0! This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in … Continue reading Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data &#43; AI with Comprehensive Upgrades 2025年7月31日 - Ron Liu. The Apache Flink PMC is proud to announce the release of Apache Flink 2.">
 <meta name="theme-color" content="#FFFFFF"><meta property="og:title" content="Apache Flink Documentation" />
 <meta property="og:description" content="" />
 <meta property="og:type" content="website" />
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/" title="Apache Flink® — Stateful Computations over Data Streams">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/zh/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
@@ -621,6 +621,23 @@
     
         
           <div class="card">
+            <div class="heading">Apache Flink CDC 3.5.0 Release Announcement</div>
+            <div class="body">
+                <p>
+                  2025年9月26日 - 
+                  
+                    Yanquan Lv.
+                  
+                </p>
+                <p class="truncate">
+                  The Apache Flink Community is excited to announce the release of Flink CDC 3.5.0!
+This release introduces new pipeline connectors for Apache Fluss and PostgreSQL, and improves usability in …
+                </p>
+                <a href="/2025/09/26/apache-flink-cdc-3.5.0-release-announcement/">Continue reading</a>
+            </div>
+          </div>
+        
+          <div class="card">
             <div class="heading">Apache Flink 2.1.0: Ushers in a New Era of Unified Real-Time Data + AI with Comprehensive Upgrades</div>
             <div class="body">
                 <p>
@@ -653,23 +670,6 @@
             </div>
           </div>
         
-          <div class="card">
-            <div class="heading">Apache Flink 1.20.2 Release Announcement</div>
-            <div class="body">
-                <p>
-                  2025年7月10日 - 
-                  
-                    Ferenc Csaky.
-                  
-                </p>
-                <p class="truncate">
-                  The Apache Flink Community is pleased to announce the second bug fix release of the Flink 1.20 series.
-This release includes 25 bug fixes, vulnerability fixes, and minor improvements for Flink 1.20. …
-                </p>
-                <a href="/2025/07/10/apache-flink-1.20.2-release-announcement/">Continue reading</a>
-            </div>
-          </div>
-        
     
         
     
diff --git a/content/zh/material/index.html b/content/zh/material/index.html
index 5964fb2..531d158 100644
--- a/content/zh/material/index.html
+++ b/content/zh/material/index.html
@@ -34,7 +34,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/material/" title="Material">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/tags/index.html b/content/zh/tags/index.html
index 61569ca..ccd2b7b 100644
--- a/content/zh/tags/index.html
+++ b/content/zh/tags/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/tags/" title="Tags">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/zh/tags/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink-ml/index.html b/content/zh/what-is-flink-ml/index.html
index 3ef1115..ef5a03c 100644
--- a/content/zh/what-is-flink-ml/index.html
+++ b/content/zh/what-is-flink-ml/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink-ml/" title="What is Flink ML?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink-table-store/index.html b/content/zh/what-is-flink-table-store/index.html
index f006a9a..bf06a56 100644
--- a/content/zh/what-is-flink-table-store/index.html
+++ b/content/zh/what-is-flink-table-store/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink-table-store/" title="What is Paimon(incubating) (formerly Flink Table Store)?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/community/index.html b/content/zh/what-is-flink/community/index.html
index 7fadf71..08621d7 100644
--- a/content/zh/what-is-flink/community/index.html
+++ b/content/zh/what-is-flink/community/index.html
@@ -38,7 +38,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/community/" title="Community & Project Info">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -292,7 +292,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/flink-applications/index.html b/content/zh/what-is-flink/flink-applications/index.html
index 9a91541..76cbbed 100644
--- a/content/zh/what-is-flink/flink-applications/index.html
+++ b/content/zh/what-is-flink/flink-applications/index.html
@@ -40,7 +40,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/flink-applications/" title="Applications">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -294,7 +294,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/flink-architecture/index.html b/content/zh/what-is-flink/flink-architecture/index.html
index 56c5564..6c4e26f 100644
--- a/content/zh/what-is-flink/flink-architecture/index.html
+++ b/content/zh/what-is-flink/flink-architecture/index.html
@@ -54,7 +54,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/flink-architecture/" title="Architecture">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -308,7 +308,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/flink-operations/index.html b/content/zh/what-is-flink/flink-operations/index.html
index 7d2508b..8fb449e 100644
--- a/content/zh/what-is-flink/flink-operations/index.html
+++ b/content/zh/what-is-flink/flink-operations/index.html
@@ -40,7 +40,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/flink-operations/" title="Operations">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -294,7 +294,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/index.html b/content/zh/what-is-flink/index.html
index a555af0..fec3be1 100644
--- a/content/zh/what-is-flink/index.html
+++ b/content/zh/what-is-flink/index.html
@@ -26,7 +26,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/" title="About">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <link rel="alternate" type="application/rss+xml" href="https://flink.apache.org/zh/what-is-flink/index.xml" title="Apache Flink" />
 <!--
 Made with Book Theme
@@ -281,7 +281,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/powered-by/index.html b/content/zh/what-is-flink/powered-by/index.html
index ab5303a..b878102 100644
--- a/content/zh/what-is-flink/powered-by/index.html
+++ b/content/zh/what-is-flink/powered-by/index.html
@@ -34,7 +34,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/powered-by/" title="Powered By">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -288,7 +288,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/roadmap/index.html b/content/zh/what-is-flink/roadmap/index.html
index 023effe..1456a2a 100644
--- a/content/zh/what-is-flink/roadmap/index.html
+++ b/content/zh/what-is-flink/roadmap/index.html
@@ -38,7 +38,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/roadmap/" title="Roadmap">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -292,7 +292,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/security/index.html b/content/zh/what-is-flink/security/index.html
index 248752d..f023eab 100644
--- a/content/zh/what-is-flink/security/index.html
+++ b/content/zh/what-is-flink/security/index.html
@@ -30,7 +30,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/security/" title="Security">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -284,7 +284,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/special-thanks/index.html b/content/zh/what-is-flink/special-thanks/index.html
index 052438a..7de9bd3 100644
--- a/content/zh/what-is-flink/special-thanks/index.html
+++ b/content/zh/what-is-flink/special-thanks/index.html
@@ -40,7 +40,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/special-thanks/" title="Special Thanks">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -294,7 +294,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-flink/use-cases/index.html b/content/zh/what-is-flink/use-cases/index.html
index e95f658..d383914 100644
--- a/content/zh/what-is-flink/use-cases/index.html
+++ b/content/zh/what-is-flink/use-cases/index.html
@@ -50,7 +50,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-flink/use-cases/" title="Use Cases">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -304,7 +304,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-stateful-functions/index.html b/content/zh/what-is-stateful-functions/index.html
index fe2bc84..a3c0d3a 100644
--- a/content/zh/what-is-stateful-functions/index.html
+++ b/content/zh/what-is-stateful-functions/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-stateful-functions/" title="What is Stateful Functions?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>
   
 
diff --git a/content/zh/what-is-the-flink-kubernetes-operator/index.html b/content/zh/what-is-the-flink-kubernetes-operator/index.html
index a60879d..ed139c4 100644
--- a/content/zh/what-is-the-flink-kubernetes-operator/index.html
+++ b/content/zh/what-is-the-flink-kubernetes-operator/index.html
@@ -28,7 +28,7 @@
 <link rel="alternate" hreflang="en" href="https://flink.apache.org/what-is-the-flink-kubernetes-operator/" title="What is the Flink Kubernetes Operator?">
 
 <link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
-<script defer src="/zh.search.min.b0420eae9e5d903d7a92aefcbdff81a659f38bb9caad1e86f6372ef01f9b986e.js" integrity="sha256-sEIOrp5dkD16kq78vf&#43;Bplnzi7nKrR6G9jcu8B&#43;bmG4="></script>
+<script defer src="/zh.search.min.040510a516d3d51f9a3ce6760378c41458201109bf05fde33ee46e14a7e843d2.js" integrity="sha256-BAUQpRbT1R&#43;aPOZ2A3jEFFggEQm/Bf3jPuRuFKfoQ9I="></script>
 <!--
 Made with Book Theme
 https://github.com/alex-shpak/hugo-book
@@ -282,7 +282,7 @@
           <li>
             
   
-    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.4 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
+    <a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.5 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
     </a>