blob: 7549a77e87216fdd578caa128ddf4e693c1e9284 [file] [log] [blame]
<!DOCTYPE html>
<html class="no-js">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Migration Guide: Structured Streaming - Spark 4.1.0-preview1 Documentation</title>
<link rel="stylesheet" href="../css/bootstrap.min.css">
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&Courier+Prime:wght@400;700&display=swap" rel="stylesheet">
<link href="../css/custom.css" rel="stylesheet">
<script src="../js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="../css/pygments-default.css">
<link rel="stylesheet" href="../css/docsearch.min.css" />
<link rel="stylesheet" href="../css/docsearch.css">
<!-- Matomo -->
<script>
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body class="global">
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar navbar-expand-lg navbar-dark p-0 px-4 fixed-top" style="background: #1d6890;" id="topbar">
<div class="navbar-brand"><a href="../index.html">
<img src="https://spark.apache.org/images/spark-logo-rev.svg" width="141" height="72"/></a><span class="version">4.1.0-preview1</span>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav me-auto">
<li class="nav-item"><a href="../index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="../quick-start.html">Quick Start</a>
<a class="dropdown-item" href="../rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="../sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="../streaming/index.html">Structured Streaming</a>
<a class="dropdown-item" href="../streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="../ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="../graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="../sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="../api/python/getting_started/index.html">PySpark (Python on Spark)</a>
<a class="dropdown-item" href="../declarative-pipelines-programming-guide.html">Declarative Pipelines</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="../api/python/index.html">Python</a>
<a class="dropdown-item" href="../api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="../api/java/index.html">Java</a>
<a class="dropdown-item" href="../api/R/index.html">R</a>
<a class="dropdown-item" href="../api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="../cluster-overview.html">Overview</a>
<a class="dropdown-item" href="../submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="../spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="../running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="../running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="../configuration.html">Configuration</a>
<a class="dropdown-item" href="../monitoring.html">Monitoring</a>
<a class="dropdown-item" href="../tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="../job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="../security.html">Security</a>
<a class="dropdown-item" href="../hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="../migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="../building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v4.1.0-preview1</span></span>-->
</div>
</nav>
<div class="container">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="../streaming/index.html">Structured Streaming Programming Guide</a></h3>
<ul>
<li>
<a href="../streaming/index.html">
Overview
</a>
</li>
<li>
<a href="../streaming/getting-started.html">
Getting Started
</a>
</li>
<li>
<a href="../streaming/apis-on-dataframes-and-datasets.html">
APIs on DataFrames and Datasets
</a>
</li>
<li>
<a href="../streaming/performance-tips.html">
Performance Tips
</a>
</li>
<li>
<a href="../streaming/additional-information.html">
Additional Information
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar mr-3" id="content">
<h1 class="title">Migration Guide: Structured Streaming</h1>
<p>Note that this migration guide describes the items specific to Structured Streaming.
Many items of SQL migration can be applied when migrating Structured Streaming to higher versions.
Please refer <a href="../sql-migration-guide.html">Migration Guide: SQL, Datasets and DataFrame</a>.</p>
<h2 id="upgrading-from-structured-streaming-35-to-40">Upgrading from Structured Streaming 3.5 to 4.0</h2>
<ul>
<li>Since Spark 4.0, Spark falls back to single batch execution if any source in the query does not support <code class="language-plaintext highlighter-rouge">Trigger.AvailableNow</code>. This is to avoid any possible correctness, duplication, and dataloss issue due to incompatibility between source and wrapper implementation. (See <a href="https://issues.apache.org/jira/browse/SPARK-45178">SPARK-45178</a> for more details.)</li>
<li>Since Spark 4.0, new configuration <code class="language-plaintext highlighter-rouge">spark.sql.streaming.ratioExtraSpaceAllowedInCheckpoint</code> (default: <code class="language-plaintext highlighter-rouge">0.3</code>) controls the amount of additional space allowed in the checkpoint directory to store stale version files for batch deletion inside maintenance task. This is to amortize the cost of listing in cloud store. Setting this to <code class="language-plaintext highlighter-rouge">0</code> defaults to the old behavior. (See <a href="https://issues.apache.org/jira/browse/SPARK-48931">SPARK-48931</a> for more details.)</li>
<li>Since Spark 4.0, when relative path is used to output data in <code class="language-plaintext highlighter-rouge">DataStreamWriter</code> the resolution to absolute path is done in the Spark Driver and is not deferred to Spark Executor. This is to make Structured Streaming behavior similar to DataFrame API (<code class="language-plaintext highlighter-rouge">DataFrameWriter</code>). (See <a href="https://issues.apache.org/jira/browse/SPARK-50854">SPARK-50854</a> for more details.)</li>
</ul>
<h2 id="upgrading-from-structured-streaming-33-to-34">Upgrading from Structured Streaming 3.3 to 3.4</h2>
<ul>
<li>
<p>Since Spark 3.4, <code class="language-plaintext highlighter-rouge">Trigger.Once</code> is deprecated, and users are encouraged to migrate from <code class="language-plaintext highlighter-rouge">Trigger.Once</code> to <code class="language-plaintext highlighter-rouge">Trigger.AvailableNow</code>. Please refer <a href="https://issues.apache.org/jira/browse/SPARK-39805">SPARK-39805</a> for more details.</p>
</li>
<li>
<p>Since Spark 3.4, the default value of configuration for Kafka offset fetching (<code class="language-plaintext highlighter-rouge">spark.sql.streaming.kafka.useDeprecatedOffsetFetching</code>) is changed from <code class="language-plaintext highlighter-rouge">true</code> to <code class="language-plaintext highlighter-rouge">false</code>. The default no longer relies consumer group based scheduling, which affect the required ACL. For further details please see <a href="structured-streaming-kafka-integration.html#offset-fetching">Structured Streaming Kafka Integration</a>.</p>
</li>
</ul>
<h2 id="upgrading-from-structured-streaming-32-to-33">Upgrading from Structured Streaming 3.2 to 3.3</h2>
<ul>
<li>Since Spark 3.3, all stateful operators require hash partitioning with exact grouping keys. In previous versions, all stateful operators except stream-stream join require loose partitioning criteria which opens the possibility on correctness issue. (See <a href="https://issues.apache.org/jira/browse/SPARK-38204">SPARK-38204</a> for more details.) To ensure backward compatibility, we retain the old behavior with the checkpoint built from older versions.</li>
</ul>
<h2 id="upgrading-from-structured-streaming-30-to-31">Upgrading from Structured Streaming 3.0 to 3.1</h2>
<ul>
<li>
<p>In Spark 3.0 and before, for the queries that have stateful operation which can emit rows older than the current watermark plus allowed late record delay, which are &#8220;late rows&#8221; in downstream stateful operations and these rows can be discarded, Spark only prints a warning message. Since Spark 3.1, Spark will check for such queries with possible correctness issue and throw AnalysisException for it by default. For the users who understand the possible risk of correctness issue and still decide to run the query, please disable this check by setting the config <code class="language-plaintext highlighter-rouge">spark.sql.streaming.statefulOperator.checkCorrectness.enabled</code> to false.</p>
</li>
<li>
<p>In Spark 3.0 and before Spark uses <code class="language-plaintext highlighter-rouge">KafkaConsumer</code> for offset fetching which could cause infinite wait in the driver.
In Spark 3.1 a new configuration option added <code class="language-plaintext highlighter-rouge">spark.sql.streaming.kafka.useDeprecatedOffsetFetching</code> (default: <code class="language-plaintext highlighter-rouge">true</code>)
which could be set to <code class="language-plaintext highlighter-rouge">false</code> allowing Spark to use new offset fetching mechanism using <code class="language-plaintext highlighter-rouge">AdminClient</code>.
For further details please see <a href="structured-streaming-kafka-integration.html#offset-fetching">Structured Streaming Kafka Integration</a>.</p>
</li>
</ul>
<h2 id="upgrading-from-structured-streaming-24-to-30">Upgrading from Structured Streaming 2.4 to 3.0</h2>
<ul>
<li>
<p>In Spark 3.0, Structured Streaming forces the source schema into nullable when file-based datasources such as text, json, csv, parquet and orc are used via <code class="language-plaintext highlighter-rouge">spark.readStream(...)</code>. Previously, it respected the nullability in source schema; however, it caused issues tricky to debug with NPE. To restore the previous behavior, set <code class="language-plaintext highlighter-rouge">spark.sql.streaming.fileSource.schema.forceNullable</code> to <code class="language-plaintext highlighter-rouge">false</code>.</p>
</li>
<li>
<p>Spark 3.0 fixes the correctness issue on Stream-stream outer join, which changes the schema of state. (See <a href="https://issues.apache.org/jira/browse/SPARK-26154">SPARK-26154</a> for more details). If you start your query from checkpoint constructed from Spark 2.x which uses stream-stream outer join, Spark 3.0 fails the query. To recalculate outputs, discard the checkpoint and replay previous inputs.</p>
</li>
<li>
<p>In Spark 3.0, the deprecated class <code class="language-plaintext highlighter-rouge">org.apache.spark.sql.streaming.ProcessingTime</code> has been removed. Use <code class="language-plaintext highlighter-rouge">org.apache.spark.sql.streaming.Trigger.ProcessingTime</code> instead. Likewise, <code class="language-plaintext highlighter-rouge">org.apache.spark.sql.execution.streaming.continuous.ContinuousTrigger</code> has been removed in favor of <code class="language-plaintext highlighter-rouge">Trigger.Continuous</code>, and <code class="language-plaintext highlighter-rouge">org.apache.spark.sql.execution.streaming.OneTimeTrigger</code> has been hidden in favor of <code class="language-plaintext highlighter-rouge">Trigger.Once</code>.</p>
</li>
</ul>
</div>
<!-- /container -->
</div>
<script src="../js/vendor/jquery-3.5.1.min.js"></script>
<script src="../js/vendor/bootstrap.bundle.min.js"></script>
<script src="../js/vendor/anchor.min.js"></script>
<script src="../js/main.js"></script>
<script type="text/javascript" src="../js/vendor/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:4.1.0-preview1"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>