blob: 31346f317c961780a7202147a803c417931bb2e9 [file] [log] [blame]
<!DOCTYPE html>
<html lang="en" dir=ZgotmplZ>
<head>
<link rel="stylesheet" href="/bootstrap/css/bootstrap.min.css">
<script src="/bootstrap/js/bootstrap.bundle.min.js"></script>
<link rel="stylesheet" type="text/css" href="/font-awesome/css/font-awesome.min.css">
<script src="/js/anchor.min.js"></script>
<script src="/js/flink.js"></script>
<link rel="canonical" href="https://flink.apache.org/2020/02/11/apache-flink-1.10.0-release-announcement/">
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="description" content="The Apache Flink community is excited to hit the double digits and announce the release of Flink 1.10.0! As a result of the biggest community effort to date, with over 1.2k issues implemented and more than 200 contributors, this release introduces significant improvements to the overall performance and stability of Flink jobs, a preview of native Kubernetes integration and great advances in Python support (PyFlink).
Flink 1.10 also marks the completion of the Blink integration, hardening streaming SQL and bringing mature batch processing to Flink with production-ready Hive integration and TPC-DS coverage.">
<meta name="theme-color" content="#FFFFFF"><meta property="og:title" content="Apache Flink 1.10.0 Release Announcement" />
<meta property="og:description" content="The Apache Flink community is excited to hit the double digits and announce the release of Flink 1.10.0! As a result of the biggest community effort to date, with over 1.2k issues implemented and more than 200 contributors, this release introduces significant improvements to the overall performance and stability of Flink jobs, a preview of native Kubernetes integration and great advances in Python support (PyFlink).
Flink 1.10 also marks the completion of the Blink integration, hardening streaming SQL and bringing mature batch processing to Flink with production-ready Hive integration and TPC-DS coverage." />
<meta property="og:type" content="article" />
<meta property="og:url" content="https://flink.apache.org/2020/02/11/apache-flink-1.10.0-release-announcement/" /><meta property="article:section" content="posts" />
<meta property="article:published_time" content="2020-02-11T02:30:00+00:00" />
<meta property="article:modified_time" content="2020-02-11T02:30:00+00:00" />
<title>Apache Flink 1.10.0 Release Announcement | Apache Flink</title>
<link rel="manifest" href="/manifest.json">
<link rel="icon" href="/favicon.png" type="image/x-icon">
<link rel="stylesheet" href="/book.min.22eceb4d17baa9cdc0f57345edd6f215a40474022dfee39b63befb5fb3c596b5.css" integrity="sha256-IuzrTRe6qc3A9XNF7dbyFaQEdAIt/uObY777X7PFlrU=">
<script defer src="/en.search.min.2698f0d1b683dae4d6cb071668b310a55ebcf1c48d11410a015a51d90105b53e.js" integrity="sha256-Jpjw0baD2uTWywcWaLMQpV688cSNEUEKAVpR2QEFtT4="></script>
<!--
Made with Book Theme
https://github.com/alex-shpak/hugo-book
-->
<meta name="generator" content="Hugo 0.124.1">
<script>
var _paq = window._paq = window._paq || [];
_paq.push(['disableCookies']);
_paq.push(["setDomains", ["*.flink.apache.org","*.nightlies.apache.org/flink"]]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '1']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
</head>
<body dir=ZgotmplZ>
<header>
<nav class="navbar navbar-expand-xl">
<div class="container-fluid">
<a class="navbar-brand" href="/">
<img src="/img/logo/png/100/flink_squirrel_100_color.png" alt="Apache Flink" height="47" width="47" class="d-inline-block align-text-middle">
<span>Apache Flink</span>
</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarSupportedContent" aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
<i class="fa fa-bars navbar-toggler-icon"></i>
</button>
<div class="collapse navbar-collapse" id="navbarSupportedContent">
<ul class="navbar-nav">
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">About</a>
<ul class="dropdown-menu">
<li>
<a class="dropdown-item" href="/what-is-flink/flink-architecture/">Architecture</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/flink-applications/">Applications</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/flink-operations/">Operations</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/use-cases/">Use Cases</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/powered-by/">Powered By</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/roadmap/">Roadmap</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/community/">Community & Project Info</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/security/">Security</a>
</li>
<li>
<a class="dropdown-item" href="/what-is-flink/special-thanks/">Special Thanks</a>
</li>
</ul>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">Getting Started</a>
<ul class="dropdown-menu">
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-stable/docs/try-flink/local_installation/">With Flink<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-stable/docs/try-flink-kubernetes-operator/quick-start/">With Flink Kubernetes Operator<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable/docs/get-started/introduction/">With Flink CDC<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-ml-docs-stable/docs/try-flink-ml/quick-start/">With Flink ML<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-statefun-docs-stable/getting-started/project-setup.html">With Flink Stateful Functions<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-stable/docs/learn-flink/overview/">Training Course<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
</ul>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">Documentation</a>
<ul class="dropdown-menu">
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-stable/">Flink 1.19 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-docs-master/">Flink Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-stable/">Kubernetes Operator 1.8 (latest)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main">Kubernetes Operator Main (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-stable">CDC 3.0 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-cdc-docs-master">CDC Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-ml-docs-stable/">ML 2.3 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-ml-docs-master">ML Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-statefun-docs-stable/">Stateful Functions 3.3 (stable)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
<li>
<a class="dropdown-item" href="https://nightlies.apache.org/flink/flink-statefun-docs-master">Stateful Functions Master (snapshot)<i class="link fa fa-external-link title" aria-hidden="true"></i>
</a>
</li>
</ul>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" role="button" data-bs-toggle="dropdown" aria-expanded="false">How to Contribute</a>
<ul class="dropdown-menu">
<li>
<a class="dropdown-item" href="/how-to-contribute/overview/">Overview</a>
</li>
<li>
<a class="dropdown-item" href="/how-to-contribute/contribute-code/">Contribute Code</a>
</li>
<li>
<a class="dropdown-item" href="/how-to-contribute/reviewing-prs/">Review Pull Requests</a>
</li>
<li>
<a class="dropdown-item" href="/how-to-contribute/code-style-and-quality-preamble/">Code Style and Quality Guide</a>
</li>
<li>
<a class="dropdown-item" href="/how-to-contribute/contribute-documentation/">Contribute Documentation</a>
</li>
<li>
<a class="dropdown-item" href="/how-to-contribute/documentation-style-guide/">Documentation Style Guide</a>
</li>
<li>
<a class="dropdown-item" href="/how-to-contribute/improve-website/">Contribute to the Website</a>
</li>
<li>
<a class="dropdown-item" href="/how-to-contribute/getting-help/">Getting Help</a>
</li>
</ul>
</li>
<li class="nav-item">
<a class="nav-link" href="/posts/">Flink Blog</a>
</li>
<li class="nav-item">
<a class="nav-link" href="/downloads/">Downloads</a>
</li>
</ul>
<div class="book-search">
<div class="book-search-spinner hidden">
<i class="fa fa-refresh fa-spin"></i>
</div>
<form class="search-bar d-flex" onsubmit="return false;"su>
<input type="text" id="book-search-input" placeholder="Search" aria-label="Search" maxlength="64" data-hotkeys="s/">
<i class="fa fa-search search"></i>
<i class="fa fa-circle-o-notch fa-spin spinner"></i>
</form>
<div class="book-search-spinner hidden"></div>
<ul id="book-search-results"></ul>
</div>
</div>
</div>
</nav>
<div class="navbar-clearfix"></div>
</header>
<main class="flex">
<section class="container book-page">
<article class="markdown">
<h1>
<a href="/2020/02/11/apache-flink-1.10.0-release-announcement/">Apache Flink 1.10.0 Release Announcement</a>
</h1>
February 11, 2020 -
Marta Paes
<a href="https://twitter.com/morsapaes">(@morsapaes)</a>
<p><p>The Apache Flink community is excited to hit the double digits and announce the release of Flink 1.10.0! As a result of the biggest community effort to date, with over 1.2k issues implemented and more than 200 contributors, this release introduces significant improvements to the overall performance and stability of Flink jobs, a preview of native Kubernetes integration and great advances in Python support (PyFlink).</p>
<p>Flink 1.10 also marks the completion of the <a href="https://flink.apache.org/news/2019/08/22/release-1.9.0.html#preview-of-the-new-blink-sql-query-processor">Blink integration</a>, hardening streaming SQL and bringing mature batch processing to Flink with production-ready Hive integration and TPC-DS coverage. This blog post describes all major new features and improvements, important changes to be aware of and what to expect moving forward.</p>
<p>The binary distribution and source artifacts are now available on the updated <a href="/downloads.html">Downloads page</a> of the Flink website. For more details, check the complete <a href="https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12315522&amp;version=12345845">release changelog</a> and the <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/">updated documentation</a>. We encourage you to download the release and share your feedback with the community through the <a href="https://flink.apache.org/community.html#mailing-lists">Flink mailing lists</a> or <a href="https://issues.apache.org/jira/projects/FLINK/summary">JIRA</a>.</p>
<h2 id="new-features-and-improvements">
New Features and Improvements
<a class="anchor" href="#new-features-and-improvements">#</a>
</h2>
<h3 id="improved-memory-management-and-configuration">
Improved Memory Management and Configuration
<a class="anchor" href="#improved-memory-management-and-configuration">#</a>
</h3>
<p>The current <code>TaskExecutor</code> memory configuration in Flink has some shortcomings that make it hard to reason about or optimize resource utilization, such as:</p>
<ul>
<li>
<p>Different configuration models for memory footprint in Streaming and Batch execution;</p>
</li>
<li>
<p>Complex and user-dependent configuration of off-heap state backends (i.e. RocksDB) in Streaming execution.</p>
</li>
</ul>
<p>To make memory options more explicit and intuitive to users, Flink 1.10 introduces significant changes to the <code>TaskExecutor</code> memory model and configuration logic (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-49%3A&#43;Unified&#43;Memory&#43;Configuration&#43;for&#43;TaskExecutors">FLIP-49</a>). These changes make Flink more adaptable to all kinds of deployment environments (e.g. Kubernetes, Yarn, Mesos), giving users strict control over its memory consumption.</p>
<p><strong>Managed Memory Extension</strong></p>
<p>Managed memory was extended to also account for memory usage of <code>RocksDBStateBackend</code>. While batch jobs can use either on-heap or off-heap memory, streaming jobs with <code>RocksDBStateBackend</code> can use off-heap memory only. Therefore, to allow users to switch between Streaming and Batch execution without having to modify cluster configurations, managed memory is now always off-heap.</p>
<p><strong>Simplified RocksDB Configuration</strong></p>
<p>Configuring an off-heap state backend like RocksDB used to involve a good deal of manual tuning, like decreasing the JVM heap size or setting Flink to use off-heap memory. This can now be achieved through Flink&rsquo;s out-of-box configuration, and adjusting the memory budget for <code>RocksDBStateBackend</code> is as simple as resizing the managed memory size.</p>
<p>Another important improvement was to allow Flink to bind RocksDB native memory usage (<a href="https://issues.apache.org/jira/browse/FLINK-7289">FLINK-7289</a>), preventing it from exceeding its total memory budget — this is especially relevant in containerized environments like Kubernetes. For details on how to enable and tune this feature, refer to <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/ops/state/large_state_tuning.html#tuning-rocksdb">Tuning RocksDB</a>.</p>
<p><span class="label label-danger">Note</span> FLIP-49 changes the process of cluster resource configuration, which may require tuning your clusters for upgrades from previous Flink versions. For a comprehensive overview of the changes introduced and tuning guidance, consult <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/ops/memory/mem_setup.html">this setup</a>.</p>
<h3 id="unified-logic-for-job-submission">
Unified Logic for Job Submission
<a class="anchor" href="#unified-logic-for-job-submission">#</a>
</h3>
<p>Prior to this release, job submission was part of the duties of the Execution Environments and closely tied to the different deployment targets (e.g. Yarn, Kubernetes, Mesos). This led to a poor separation of concerns and, over time, to a growing number of customized environments that users needed to configure and manage separately.</p>
<p>In Flink 1.10, job submission logic is abstracted into the generic <code>Executor</code> interface (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-73%3A&#43;Introducing&#43;Executors&#43;for&#43;job&#43;submission">FLIP-73</a>). The addition of the <code>ExecutorCLI</code> (<a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=133631524">FLIP-81</a>) introduces a unified way to specify configuration parameters for <strong>any</strong> <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/ops/cli.html#deployment-targets">execution target</a>. To round up this effort, the process of result retrieval was also decoupled from job submission with the introduction of a <code>JobClient</code> (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-74%3A&#43;Flink&#43;JobClient&#43;API">FLINK-74</a>), responsible for fetching the <code>JobExecutionResult</code>.</p>
<span>
<center>
<img vspace="8" style="width:100%" src="/img/blog/2020-02-11-release-1.10.0/flink_1.10_zeppelin.png" />
</center>
</span>
<p>In particular, these changes make it much easier to programmatically use Flink in downstream frameworks — for example, Apache Beam or Zeppelin interactive notebooks — by providing users with a unified entry point to Flink. For users working with Flink across multiple target environments, the transition to a configuration-based execution process also significantly reduces boilerplate code and maintainability overhead.</p>
<h3 id="native-kubernetes-integration-beta">
Native Kubernetes Integration (Beta)
<a class="anchor" href="#native-kubernetes-integration-beta">#</a>
</h3>
<p>For users looking to get started with Flink on a containerized environment, deploying and managing a standalone cluster on top of Kubernetes requires some upfront knowledge about containers, operators and environment-specific tools like <code>kubectl</code>.</p>
<p>In Flink 1.10, we rolled out the first phase of <strong>Active Kubernetes Integration</strong> (<a href="https://jira.apache.org/jira/browse/FLINK-9953">FLINK-9953</a>) with support for session clusters (with per-job planned). In this context, “active” means that Flink’s ResourceManager (<code>K8sResMngr</code>) natively communicates with Kubernetes to allocate new pods on-demand, similar to Flink’s Yarn and Mesos integration. Users can also leverage namespaces to launch Flink clusters for multi-tenant environments with limited aggregate resource consumption. RBAC roles and service accounts with enough permission should be configured beforehand.</p>
<span>
<center>
<img vspace="8" style="width:75%" src="/img/blog/2020-02-11-release-1.10.0/flink_1.10_nativek8s.png"/>
</center>
</span>
<p>As introduced in <a href="#unified-logic-for-job-submission">Unified Logic For Job Submission</a>, all command-line options in Flink 1.10 are mapped to a unified configuration. For this reason, users can simply refer to the Kubernetes config options and submit a job to an existing Flink session on Kubernetes in the CLI using:</p>
<div class="highlight"><pre tabindex="0" class="chroma"><code class="language-bash" data-lang="bash"><span class="line"><span class="cl">./bin/flink run -d -e kubernetes-session -Dkubernetes.cluster-id<span class="o">=</span>&lt;ClusterId&gt; examples/streaming/WindowJoin.jar
</span></span></code></pre></div><p>If you want to try out this preview feature, we encourage you to walk through the <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/ops/deployment/native_kubernetes.html">Native Kubernetes setup</a>, play around with it and share feedback with the community.</p>
<h3 id="table-apisql-production-ready-hive-integration">
Table API/SQL: Production-ready Hive Integration
<a class="anchor" href="#table-apisql-production-ready-hive-integration">#</a>
</h3>
<p>Hive integration was announced as a preview feature in Flink 1.9. This preview allowed users to persist Flink-specific metadata (e.g. Kafka tables) in Hive Metastore using SQL DDL, call UDFs defined in Hive and use Flink for reading and writing Hive tables. Flink 1.10 rounds up this effort with further developments that bring production-ready Hive integration to Flink with full compatibility of <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/dev/table/hive/#supported-hive-versions">most Hive versions</a>.</p>
<h4 id="native-partition-support-for-batch-sql">
Native Partition Support for Batch SQL
<a class="anchor" href="#native-partition-support-for-batch-sql">#</a>
</h4>
<p>So far, only writes to non-partitioned Hive tables were supported. In Flink 1.10, the Flink SQL syntax has been extended with <code>INSERT OVERWRITE</code> and <code>PARTITION</code> (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-63%3A&#43;Rework&#43;table&#43;partition&#43;support">FLIP-63</a>), enabling users to write into both static and dynamic partitions in Hive.</p>
<p><strong>Static Partition Writing</strong></p>
<div class="highlight"><pre tabindex="0" class="chroma"><code class="language-sql" data-lang="sql"><span class="line"><span class="cl"><span class="k">INSERT</span><span class="w"> </span><span class="err">{</span><span class="w"> </span><span class="k">INTO</span><span class="w"> </span><span class="o">|</span><span class="w"> </span><span class="n">OVERWRITE</span><span class="w"> </span><span class="err">}</span><span class="w"> </span><span class="k">TABLE</span><span class="w"> </span><span class="n">tablename1</span><span class="w"> </span><span class="p">[</span><span class="n">PARTITION</span><span class="w"> </span><span class="p">(</span><span class="n">partcol1</span><span class="o">=</span><span class="n">val1</span><span class="p">,</span><span class="w"> </span><span class="n">partcol2</span><span class="o">=</span><span class="n">val2</span><span class="w"> </span><span class="p">...)]</span><span class="w"> </span><span class="n">select_statement1</span><span class="w"> </span><span class="k">FROM</span><span class="w"> </span><span class="n">from_statement</span><span class="p">;</span><span class="w">
</span></span></span></code></pre></div><p><strong>Dynamic Partition Writing</strong></p>
<div class="highlight"><pre tabindex="0" class="chroma"><code class="language-sql" data-lang="sql"><span class="line"><span class="cl"><span class="k">INSERT</span><span class="w"> </span><span class="err">{</span><span class="w"> </span><span class="k">INTO</span><span class="w"> </span><span class="o">|</span><span class="w"> </span><span class="n">OVERWRITE</span><span class="w"> </span><span class="err">}</span><span class="w"> </span><span class="k">TABLE</span><span class="w"> </span><span class="n">tablename1</span><span class="w"> </span><span class="n">select_statement1</span><span class="w"> </span><span class="k">FROM</span><span class="w"> </span><span class="n">from_statement</span><span class="p">;</span><span class="w">
</span></span></span></code></pre></div><p>Fully supporting partitioned tables allows users to take advantage of partition pruning on read, which significantly increases the performance of these operations by reducing the amount of data that needs to be scanned.</p>
<h4 id="further-optimizations">
Further Optimizations
<a class="anchor" href="#further-optimizations">#</a>
</h4>
<p>Besides partition pruning, Flink 1.10 introduces more <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/dev/table/hive/read_write_hive.html#optimizations">read optimizations</a> to Hive integration, such as:</p>
<ul>
<li>
<p><strong>Projection pushdown:</strong> Flink leverages projection pushdown to minimize data transfer between Flink and Hive tables by omitting unnecessary fields from table scans. This is especially beneficial for tables with a large number of columns.</p>
</li>
<li>
<p><strong>LIMIT pushdown:</strong> for queries with the <code>LIMIT</code> clause, Flink will limit the number of output records wherever possible to minimize the amount of data transferred across the network.</p>
</li>
<li>
<p><strong>ORC Vectorization on Read:</strong> to boost read performance for ORC files, Flink now uses the native ORC Vectorized Reader by default for Hive versions above 2.0.0 and columns with non-complex data types.</p>
</li>
</ul>
<h4 id="pluggable-modules-as-flink-system-objects-beta">
Pluggable Modules as Flink System Objects (Beta)
<a class="anchor" href="#pluggable-modules-as-flink-system-objects-beta">#</a>
</h4>
<p>Flink 1.10 introduces a generic mechanism for pluggable modules in the Flink table core, with a first focus on system functions (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-68%3A&#43;Extend&#43;Core&#43;Table&#43;System&#43;with&#43;Pluggable&#43;Modules">FLIP-68</a>). With modules, users can extend Flink’s system objects — for example use Hive built-in functions that behave like Flink system functions. This release ships with a pre-implemented <code>HiveModule</code>, supporting multiple Hive versions, but users are also given the possibility to <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/dev/table/modules.html">write their own pluggable modules</a>.</p>
<h3 id="other-improvements-to-the-table-apisql">
Other Improvements to the Table API/SQL
<a class="anchor" href="#other-improvements-to-the-table-apisql">#</a>
</h3>
<h4 id="watermarks-and-computed-columns-in-sql-ddl">
Watermarks and Computed Columns in SQL DDL
<a class="anchor" href="#watermarks-and-computed-columns-in-sql-ddl">#</a>
</h4>
<p>Flink 1.10 supports stream-specific syntax extensions to define time attributes and watermark generation in Flink SQL DDL (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-66%3A&#43;Support&#43;Time&#43;Attribute&#43;in&#43;SQL&#43;DDL">FLIP-66</a>). This allows time-based operations, like windowing, and the definition of <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/dev/table/sql/create.html#create-table">watermark strategies</a> on tables created using DDL statements.</p>
<div class="highlight"><pre tabindex="0" class="chroma"><code class="language-sql" data-lang="sql"><span class="line"><span class="cl"><span class="k">CREATE</span><span class="w"> </span><span class="k">TABLE</span><span class="w"> </span><span class="k">table_name</span><span class="w"> </span><span class="p">(</span><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w"> </span><span class="n">WATERMARK</span><span class="w"> </span><span class="k">FOR</span><span class="w"> </span><span class="n">columnName</span><span class="w"> </span><span class="k">AS</span><span class="w"> </span><span class="o">&lt;</span><span class="n">watermark_strategy_expression</span><span class="o">&gt;</span><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w"></span><span class="p">)</span><span class="w"> </span><span class="k">WITH</span><span class="w"> </span><span class="p">(</span><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w"> </span><span class="p">...</span><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w"></span><span class="p">)</span><span class="w">
</span></span></span></code></pre></div><p>This release also introduces support for virtual computed columns (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-70%3A&#43;Flink&#43;SQL&#43;Computed&#43;Column&#43;Design">FLIP-70</a>) that can be derived based on other columns in the same table or deterministic expressions (i.e. literal values, UDFs and built-in functions). In Flink, computed columns are useful to define time attributes <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/dev/table/sql/create.html#create-table">upon table creation</a>.</p>
<h4 id="additional-extensions-to-sql-ddl">
Additional Extensions to SQL DDL
<a class="anchor" href="#additional-extensions-to-sql-ddl">#</a>
</h4>
<p>There is now a clear distinction between temporary/persistent and system/catalog functions (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-57%3A&#43;Rework&#43;FunctionCatalog">FLIP-57</a>). This not only eliminates ambiguity in function reference, but also allows for deterministic function resolution order (i.e. in case of naming collision, system functions will precede catalog functions, with temporary functions taking precedence over persistent functions for both dimensions).</p>
<p>Following the groundwork in FLIP-57, we extended the SQL DDL syntax to support the creation of catalog functions, temporary functions and temporary system functions (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-79&#43;Flink&#43;Function&#43;DDL&#43;Support">FLIP-79</a>):</p>
<div class="highlight"><pre tabindex="0" class="chroma"><code class="language-sql" data-lang="sql"><span class="line"><span class="cl"><span class="k">CREATE</span><span class="w"> </span><span class="p">[</span><span class="k">TEMPORARY</span><span class="o">|</span><span class="k">TEMPORARY</span><span class="w"> </span><span class="k">SYSTEM</span><span class="p">]</span><span class="w"> </span><span class="k">FUNCTION</span><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w"> </span><span class="p">[</span><span class="k">IF</span><span class="w"> </span><span class="k">NOT</span><span class="w"> </span><span class="k">EXISTS</span><span class="p">]</span><span class="w"> </span><span class="p">[</span><span class="k">catalog_name</span><span class="p">.][</span><span class="n">db_name</span><span class="p">.]</span><span class="n">function_name</span><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w">
</span></span></span><span class="line"><span class="cl"><span class="w"></span><span class="k">AS</span><span class="w"> </span><span class="n">identifier</span><span class="w"> </span><span class="p">[</span><span class="k">LANGUAGE</span><span class="w"> </span><span class="n">JAVA</span><span class="o">|</span><span class="n">SCALA</span><span class="p">]</span><span class="w">
</span></span></span></code></pre></div><p>For a complete overview of the current state of DDL support in Flink SQL, check the <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/dev/table/sql/">updated documentation</a>.</p>
<p><span class="label label-danger">Note</span> In order to correctly handle and guarantee a consistent behavior across meta-objects (tables, views, functions) in the future, some object declaration methods in the Table API have been deprecated in favor of methods that are closer to standard SQL DDL (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-64%3A&#43;Support&#43;for&#43;Temporary&#43;Objects&#43;in&#43;Table&#43;module">FLIP-64</a>).</p>
<h4 id="full-tpc-ds-coverage-for-batch">
Full TPC-DS Coverage for Batch
<a class="anchor" href="#full-tpc-ds-coverage-for-batch">#</a>
</h4>
<p>TPC-DS is a widely used industry-standard decision support benchmark to evaluate and measure the performance of SQL-based data processing engines. In Flink 1.10, all TPC-DS queries are supported end-to-end (<a href="https://issues.apache.org/jira/browse/FLINK-11491">FLINK-11491</a>), reflecting the readiness of its SQL engine to address the needs of modern data warehouse-like workloads.</p>
<h3 id="pyflink-support-for-native-user-defined-functions-udfs">
PyFlink: Support for Native User Defined Functions (UDFs)
<a class="anchor" href="#pyflink-support-for-native-user-defined-functions-udfs">#</a>
</h3>
<p>A preview of PyFlink was introduced in the previous release, making headway towards the goal of full Python support in Flink. For this release, the focus was to enable users to register and use Python User-Defined Functions (UDF, with UDTF/UDAF planned) in the Table API/SQL (<a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-58%3A&#43;Flink&#43;Python&#43;User-Defined&#43;Stateless&#43;Function&#43;for&#43;Table">FLIP-58</a>).</p>
<span>
<center>
<img vspace="8" hspace="100" style="width:75%" src="/img/blog/2020-02-11-release-1.10.0/flink_1.10_pyflink.gif"/>
</center>
</span>
<p>If you are interested in the underlying implementation — leveraging Apache Beam’s <a href="https://beam.apache.org/roadmap/portability/">Portability Framework</a> — refer to the “Architecture” section of FLIP-58 and also to <a href="https://cwiki.apache.org/confluence/display/FLINK/FLIP-78%3A&#43;Flink&#43;Python&#43;UDF&#43;Environment&#43;and&#43;Dependency&#43;Management">FLIP-78</a>. These data structures lay the required foundation for Pandas support and for PyFlink to eventually reach the DataStream API.</p>
<p>From Flink 1.10, users can also easily install PyFlink through <code>pip</code> using:</p>
<div class="highlight"><pre tabindex="0" class="chroma"><code class="language-bash" data-lang="bash"><span class="line"><span class="cl">pip install apache-flink
</span></span></code></pre></div><p>For a preview of other improvements planned for PyFlink, check <a href="https://issues.apache.org/jira/browse/FLINK-14500">FLINK-14500</a> and get involved in the <a href="http://apache-flink.147419.n8.nabble.com/Re-DISCUSS-What-parts-of-the-Python-API-should-we-focus-on-next-td1285.html">discussion</a> for requested user features.</p>
<h2 id="important-changes">
Important Changes
<a class="anchor" href="#important-changes">#</a>
</h2>
<ul>
<li>
<p>[<a href="https://issues.apache.org/jira/browse/FLINK-10725">FLINK-10725</a>] Flink can now be compiled and run on Java 11.</p>
</li>
<li>
<p>[<a href="https://jira.apache.org/jira/browse/FLINK-15495">FLINK-15495</a>] The Blink planner is now the default in the SQL Client, so that users can benefit from all the latest features and improvements. The switch from the old planner in the Table API is also planned for the next release, so we recommend that users start getting familiar with the Blink planner.</p>
</li>
<li>
<p>[<a href="https://issues.apache.org/jira/browse/FLINK-13025">FLINK-13025</a>] There is a <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/dev/connectors/elasticsearch.html#elasticsearch-connector">new Elasticsearch sink connector</a>, fully supporting Elasticsearch 7.x versions.</p>
</li>
<li>
<p>[<a href="https://issues.apache.org/jira/browse/FLINK-15115">FLINK-15115</a>] The connectors for Kafka 0.8 and 0.9 have been marked as deprecated and will no longer be actively supported. If you are still using these versions or have any other related concerns, please reach out to the @dev mailing list.</p>
</li>
<li>
<p>[<a href="https://issues.apache.org/jira/browse/FLINK-14516">FLINK-14516</a>] The non-credit-based network flow control code was removed, along with the configuration option <code>taskmanager.network.credit.model</code>. Moving forward, Flink will always use credit-based flow control.</p>
</li>
<li>
<p>[<a href="https://issues.apache.org/jira/browse/FLINK-12122">FLINK-12122</a>] <a href="https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=65147077">FLIP-6</a> was rolled out with Flink 1.5.0 and introduced a code regression related to the way slots are allocated from <code>TaskManagers</code>. To use a scheduling strategy that is closer to the pre-FLIP behavior, where Flink tries to spread out the workload across all currently available <code>TaskManagers</code>, users can set <code>cluster.evenly-spread-out-slots: true</code> in the <code>flink-conf.yaml</code>.</p>
</li>
<li>
<p>[<a href="https://issues.apache.org/jira/browse/FLINK-11956">FLINK-11956</a>] <code>s3-hadoop</code> and <code>s3-presto</code> filesystems no longer use class relocations and should be loaded through <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/ops/filesystems/#pluggable-file-systems">plugins</a>, but now seamlessly integrate with all credential providers. Other filesystems are strongly recommended to be used only as plugins, as we will continue to remove relocations.</p>
</li>
<li>
<p>Flink 1.9 shipped with a refactored Web UI, with the legacy one being kept around as backup in case something wasn’t working as expected. No issues have been reported so far, so <a href="http://apache-flink-mailing-list-archive.1008284.n3.nabble.com/DISCUSS-Remove-old-WebUI-td35218.html">the community voted</a> to drop the legacy Web UI in Flink 1.10.</p>
</li>
</ul>
<h2 id="release-notes">
Release Notes
<a class="anchor" href="#release-notes">#</a>
</h2>
<p>Please review the <a href="//nightlies.apache.org/flink/flink-docs-release-1.10/release-notes/flink-1.10.html">release notes</a> carefully for a detailed list of changes and new features if you plan to upgrade your setup to Flink 1.10. This version is API-compatible with previous 1.x releases for APIs annotated with the @Public annotation.</p>
<h2 id="list-of-contributors">
List of Contributors
<a class="anchor" href="#list-of-contributors">#</a>
</h2>
<p>The Apache Flink community would like to thank all contributors that have made this release possible:</p>
<p>Achyuth Samudrala, Aitozi, Alberto Romero, Alec.Ch, Aleksey Pak, Alexander Fedulov, Alice Yan, Aljoscha Krettek, Aloys, Andrey Zagrebin, Arvid Heise, Benchao Li, Benoit Hanotte, Benoît Paris, Bhagavan Das, Biao Liu, Chesnay Schepler, Congxian Qiu, Cyrille Chépélov, César Soto Valero, David Anderson, David Hrbacek, David Moravek, Dawid Wysakowicz, Dezhi Cai, Dian Fu, Dyana Rose, Eamon Taaffe, Fabian Hueske, Fawad Halim, Fokko Driesprong, Frey Gao, Gabor Gevay, Gao Yun, Gary Yao, GatsbyNewton, GitHub, Grebennikov Roman, GuoWei Ma, Gyula Fora, Haibo Sun, Hao Dang, Henvealf, Hongtao Zhang, HuangXingBo, Hwanju Kim, Igal Shilman, Jacob Sevart, Jark Wu, Jeff Martin, Jeff Yang, Jeff Zhang, Jiangjie (Becket) Qin, Jiayi, Jiayi Liao, Jincheng Sun, Jing Zhang, Jingsong Lee, JingsongLi, Joao Boto, John Lonergan, Kaibo Zhou, Konstantin Knauf, Kostas Kloudas, Kurt Young, Leonard Xu, Ling Wang, Lining Jing, Liupengcheng, LouisXu, Mads Chr. Olesen, Marco Zühlke, Marcos Klein, Matyas Orhidi, Maximilian Bode, Maximilian Michels, Nick Pavlakis, Nico Kruber, Nicolas Deslandes, Pablo Valtuille, Paul Lam, Paul Lin, PengFei Li, Piotr Nowojski, Piotr Przybylski, Piyush Narang, Ricco Chen, Richard Deurwaarder, Robert Metzger, Roman, Roman Grebennikov, Roman Khachatryan, Rong Rong, Rui Li, Ryan Tao, Scott Kidder, Seth Wiesman, Shannon Carey, Shaobin.Ou, Shuo Cheng, Stefan Richter, Stephan Ewen, Steve OU, Steven Wu, Terry Wang, Thesharing, Thomas Weise, Till Rohrmann, Timo Walther, Tony Wei, TsReaper, Tzu-Li (Gordon) Tai, Victor Wong, WangHengwei, Wei Zhong, WeiZhong94, Wind (Jiayi Liao), Xintong Song, XuQianJin-Stars, Xuefu Zhang, Xupingyong, Yadong Xie, Yang Wang, Yangze Guo, Yikun Jiang, Ying, YngwieWang, Yu Li, Yuan Mei, Yun Gao, Yun Tang, Zhanchun Zhang, Zhenghua Gao, Zhijiang, Zhu Zhu, a-suiniaev, azagrebin, beyond1920, biao.liub, blueszheng, bowen.li, caoyingjie, catkint, chendonglin, chenqi, chunpinghe, cyq89051127, danrtsey.wy, dengziming, dianfu, eskabetxe, fanrui, forideal, gentlewang, godfrey he, godfreyhe, haodang, hehuiyuan, hequn8128, hpeter, huangxingbo, huzheng, ifndef-SleePy, jiemotongxue, joe, jrthe42, kevin.cyj, klion26, lamber-ken, libenchao, liketic, lincoln-lil, lining, liuyongvs, liyafan82, lz, mans2singh, mojo, openinx, ouyangwulin, shining-huang, shuai-xu, shuo.cs, stayhsfLee, sunhaibotb, sunjincheng121, tianboxiu, tianchen, tianchen92, tison, tszkitlo40, unknown, vinoyang, vthinkxie, wangpeibin, wangxiaowei, wangxiyuan, wangxlong, wangyang0918, whlwanghailong, xuchao0903, xuyang1706, yanghua, yangjf2019, yongqiang chai, yuzhao.cyz, zentol, zhangzhanchum, zhengcanbin, zhijiang, zhongyong jin, zhuzhu.zz, zjuwangg, zoudaokoulife, 砚田, 谢磊, 张志豪, 曹建华</p>
</p>
</article>
<div class="edit-this-page">
<p>
<a href="https://cwiki.apache.org/confluence/display/FLINK/Flink+Translation+Specifications">Want to contribute translation?</a>
</p>
<p>
<a href="//github.com/apache/flink-web/edit/asf-site/docs/content/posts/2020-02-11-release-1.10.0.md">
Edit This Page<i class="fa fa-edit fa-fw"></i>
</a>
</p>
</div>
</section>
<aside class="book-toc">
<nav id="TableOfContents"><h3>On This Page <a href="javascript:void(0)" class="toc" onclick="collapseToc()"><i class="fa fa-times" aria-hidden="true"></i></a></h3>
<ul>
<li>
<ul>
<li><a href="#new-features-and-improvements">New Features and Improvements</a>
<ul>
<li><a href="#improved-memory-management-and-configuration">Improved Memory Management and Configuration</a></li>
<li><a href="#unified-logic-for-job-submission">Unified Logic for Job Submission</a></li>
<li><a href="#native-kubernetes-integration-beta">Native Kubernetes Integration (Beta)</a></li>
<li><a href="#table-apisql-production-ready-hive-integration">Table API/SQL: Production-ready Hive Integration</a></li>
<li><a href="#other-improvements-to-the-table-apisql">Other Improvements to the Table API/SQL</a></li>
<li><a href="#pyflink-support-for-native-user-defined-functions-udfs">PyFlink: Support for Native User Defined Functions (UDFs)</a></li>
</ul>
</li>
<li><a href="#important-changes">Important Changes</a></li>
<li><a href="#release-notes">Release Notes</a></li>
<li><a href="#list-of-contributors">List of Contributors</a></li>
</ul>
</li>
</ul>
</nav>
</aside>
<aside class="expand-toc hidden">
<a class="toc" onclick="expandToc()" href="javascript:void(0)">
<i class="fa fa-bars" aria-hidden="true"></i>
</a>
</aside>
</main>
<footer>
<div class="separator"></div>
<div class="panels">
<div class="wrapper">
<div class="panel">
<ul>
<li>
<a href="https://flink-packages.org/">flink-packages.org</a>
</li>
<li>
<a href="https://www.apache.org/">Apache Software Foundation</a>
</li>
<li>
<a href="https://www.apache.org/licenses/">License</a>
</li>
<li>
<a href="/zh/">
<i class="fa fa-globe" aria-hidden="true"></i>&nbsp;中文版
</a>
</li>
</ul>
</div>
<div class="panel">
<ul>
<li>
<a href="/what-is-flink/security">Security</a-->
</li>
<li>
<a href="https://www.apache.org/foundation/sponsorship.html">Donate</a>
</li>
<li>
<a href="https://www.apache.org/foundation/thanks.html">Thanks</a>
</li>
</ul>
</div>
<div class="panel icons">
<div>
<a href="/posts">
<div class="icon flink-blog-icon"></div>
<span>Flink blog</span>
</a>
</div>
<div>
<a href="https://github.com/apache/flink">
<div class="icon flink-github-icon"></div>
<span>Github</span>
</a>
</div>
<div>
<a href="https://twitter.com/apacheflink">
<div class="icon flink-twitter-icon"></div>
<span>Twitter</span>
</a>
</div>
</div>
</div>
</div>
<hr/>
<div class="container disclaimer">
<p>The contents of this website are © 2024 Apache Software Foundation under the terms of the Apache License v2. Apache Flink, Flink, and the Flink logo are either registered trademarks or trademarks of The Apache Software Foundation in the United States and other countries.</p>
</div>
</footer>
</body>
</html>