blob: 5e13e6c5866c1f19d5cdf6c7475299e1cd0ba268 [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Migration Guide: SparkR (R on Spark) - Spark 3.4.3 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 60px;
padding-bottom: 40px;
}
</style>
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
<link rel="stylesheet" href="css/docsearch.css">
<!-- Matomo -->
<script>
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body>
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar fixed-top navbar-expand-md navbar-light bg-light" id="topbar">
<div class="container">
<div class="navbar-header">
<div class="navbar-brand"><a href="index.html">
<img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">3.4.3</span>
</div>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav">
<!--TODO(andyk): Add class="active" attribute to li some how.-->
<li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="quick-start.html">Quick Start</a>
<a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a>
<a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="api/java/index.html">Java</a>
<a class="dropdown-item" href="api/python/index.html">Python</a>
<a class="dropdown-item" href="api/R/index.html">R</a>
<a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="cluster-overview.html">Overview</a>
<a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="running-on-mesos.html">Mesos</a>
<a class="dropdown-item" href="running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="configuration.html">Configuration</a>
<a class="dropdown-item" href="monitoring.html">Monitoring</a>
<a class="dropdown-item" href="tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="security.html">Security</a>
<a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v3.4.3</span></span>-->
</div>
</div>
</nav>
<div class="container-wrapper">
<div class="content mr-3" id="content">
<h1 class="title">Migration Guide: SparkR (R on Spark)</h1>
<ul id="markdown-toc">
<li><a href="#upgrading-from-sparkr-31-to-32" id="markdown-toc-upgrading-from-sparkr-31-to-32">Upgrading from SparkR 3.1 to 3.2</a></li>
<li><a href="#upgrading-from-sparkr-24-to-30" id="markdown-toc-upgrading-from-sparkr-24-to-30">Upgrading from SparkR 2.4 to 3.0</a></li>
<li><a href="#upgrading-from-sparkr-23-to-24" id="markdown-toc-upgrading-from-sparkr-23-to-24">Upgrading from SparkR 2.3 to 2.4</a></li>
<li><a href="#upgrading-from-sparkr-23-to-231-and-above" id="markdown-toc-upgrading-from-sparkr-23-to-231-and-above">Upgrading from SparkR 2.3 to 2.3.1 and above</a></li>
<li><a href="#upgrading-from-sparkr-22-to-23" id="markdown-toc-upgrading-from-sparkr-22-to-23">Upgrading from SparkR 2.2 to 2.3</a></li>
<li><a href="#upgrading-from-sparkr-21-to-22" id="markdown-toc-upgrading-from-sparkr-21-to-22">Upgrading from SparkR 2.1 to 2.2</a></li>
<li><a href="#upgrading-from-sparkr-20-to-31" id="markdown-toc-upgrading-from-sparkr-20-to-31">Upgrading from SparkR 2.0 to 3.1</a></li>
<li><a href="#upgrading-from-sparkr-16-to-20" id="markdown-toc-upgrading-from-sparkr-16-to-20">Upgrading from SparkR 1.6 to 2.0</a></li>
<li><a href="#upgrading-from-sparkr-15-to-16" id="markdown-toc-upgrading-from-sparkr-15-to-16">Upgrading from SparkR 1.5 to 1.6</a></li>
</ul>
<p>Note that this migration guide describes the items specific to SparkR.
Many items of SQL migration can be applied when migrating SparkR to higher versions.
Please refer <a href="sql-migration-guide.html">Migration Guide: SQL, Datasets and DataFrame</a>.</p>
<h2 id="upgrading-from-sparkr-31-to-32">Upgrading from SparkR 3.1 to 3.2</h2>
<ul>
<li>Previously, SparkR automatically downloaded and installed the Spark distribution in user&#8217; cache directory to complete SparkR installation when SparkR runs in a plain R shell or Rscript, and the Spark distribution cannot be found. Now, it asks if users want to download and install or not. To restore the previous behavior, set <code class="language-plaintext highlighter-rouge">SPARKR_ASK_INSTALLATION</code> environment variable to <code class="language-plaintext highlighter-rouge">FALSE</code>.</li>
</ul>
<h2 id="upgrading-from-sparkr-24-to-30">Upgrading from SparkR 2.4 to 3.0</h2>
<ul>
<li>The deprecated methods <code class="language-plaintext highlighter-rouge">parquetFile</code>, <code class="language-plaintext highlighter-rouge">saveAsParquetFile</code>, <code class="language-plaintext highlighter-rouge">jsonFile</code>, <code class="language-plaintext highlighter-rouge">jsonRDD</code> have been removed. Use <code class="language-plaintext highlighter-rouge">read.parquet</code>, <code class="language-plaintext highlighter-rouge">write.parquet</code>, <code class="language-plaintext highlighter-rouge">read.json</code> instead.</li>
</ul>
<h2 id="upgrading-from-sparkr-23-to-24">Upgrading from SparkR 2.3 to 2.4</h2>
<ul>
<li>Previously, we don&#8217;t check the validity of the size of the last layer in <code class="language-plaintext highlighter-rouge">spark.mlp</code>. For example, if the training data only has two labels, a <code class="language-plaintext highlighter-rouge">layers</code> param like <code class="language-plaintext highlighter-rouge">c(1, 3)</code> doesn&#8217;t cause an error previously, now it does.</li>
</ul>
<h2 id="upgrading-from-sparkr-23-to-231-and-above">Upgrading from SparkR 2.3 to 2.3.1 and above</h2>
<ul>
<li>In SparkR 2.3.0 and earlier, the <code class="language-plaintext highlighter-rouge">start</code> parameter of <code class="language-plaintext highlighter-rouge">substr</code> method was wrongly subtracted by one and considered as 0-based. This can lead to inconsistent substring results and also does not match with the behaviour with <code class="language-plaintext highlighter-rouge">substr</code> in R. In version 2.3.1 and later, it has been fixed so the <code class="language-plaintext highlighter-rouge">start</code> parameter of <code class="language-plaintext highlighter-rouge">substr</code> method is now 1-based. As an example, <code class="language-plaintext highlighter-rouge">substr(lit('abcdef'), 2, 4))</code> would result to <code class="language-plaintext highlighter-rouge">abc</code> in SparkR 2.3.0, and the result would be <code class="language-plaintext highlighter-rouge">bcd</code> in SparkR 2.3.1.</li>
</ul>
<h2 id="upgrading-from-sparkr-22-to-23">Upgrading from SparkR 2.2 to 2.3</h2>
<ul>
<li>The <code class="language-plaintext highlighter-rouge">stringsAsFactors</code> parameter was previously ignored with <code class="language-plaintext highlighter-rouge">collect</code>, for example, in <code class="language-plaintext highlighter-rouge">collect(createDataFrame(iris), stringsAsFactors = TRUE))</code>. It has been corrected.</li>
<li>For <code class="language-plaintext highlighter-rouge">summary</code>, option for statistics to compute has been added. Its output is changed from that from <code class="language-plaintext highlighter-rouge">describe</code>.</li>
<li>A warning can be raised if versions of SparkR package and the Spark JVM do not match.</li>
</ul>
<h2 id="upgrading-from-sparkr-21-to-22">Upgrading from SparkR 2.1 to 2.2</h2>
<ul>
<li>A <code class="language-plaintext highlighter-rouge">numPartitions</code> parameter has been added to <code class="language-plaintext highlighter-rouge">createDataFrame</code> and <code class="language-plaintext highlighter-rouge">as.DataFrame</code>. When splitting the data, the partition position calculation has been made to match the one in Scala.</li>
<li>The method <code class="language-plaintext highlighter-rouge">createExternalTable</code> has been deprecated to be replaced by <code class="language-plaintext highlighter-rouge">createTable</code>. Either methods can be called to create external or managed table. Additional catalog methods have also been added.</li>
<li>By default, derby.log is now saved to <code class="language-plaintext highlighter-rouge">tempdir()</code>. This will be created when instantiating the SparkSession with <code class="language-plaintext highlighter-rouge">enableHiveSupport</code> set to <code class="language-plaintext highlighter-rouge">TRUE</code>.</li>
<li><code class="language-plaintext highlighter-rouge">spark.lda</code> was not setting the optimizer correctly. It has been corrected.</li>
<li>Several model summary outputs are updated to have <code class="language-plaintext highlighter-rouge">coefficients</code> as <code class="language-plaintext highlighter-rouge">matrix</code>. This includes <code class="language-plaintext highlighter-rouge">spark.logit</code>, <code class="language-plaintext highlighter-rouge">spark.kmeans</code>, <code class="language-plaintext highlighter-rouge">spark.glm</code>. Model summary outputs for <code class="language-plaintext highlighter-rouge">spark.gaussianMixture</code> have added log-likelihood as <code class="language-plaintext highlighter-rouge">loglik</code>.</li>
</ul>
<h2 id="upgrading-from-sparkr-20-to-31">Upgrading from SparkR 2.0 to 3.1</h2>
<ul>
<li><code class="language-plaintext highlighter-rouge">join</code> no longer performs Cartesian Product by default, use <code class="language-plaintext highlighter-rouge">crossJoin</code> instead.</li>
</ul>
<h2 id="upgrading-from-sparkr-16-to-20">Upgrading from SparkR 1.6 to 2.0</h2>
<ul>
<li>The method <code class="language-plaintext highlighter-rouge">table</code> has been removed and replaced by <code class="language-plaintext highlighter-rouge">tableToDF</code>.</li>
<li>The class <code class="language-plaintext highlighter-rouge">DataFrame</code> has been renamed to <code class="language-plaintext highlighter-rouge">SparkDataFrame</code> to avoid name conflicts.</li>
<li>Spark&#8217;s <code class="language-plaintext highlighter-rouge">SQLContext</code> and <code class="language-plaintext highlighter-rouge">HiveContext</code> have been deprecated to be replaced by <code class="language-plaintext highlighter-rouge">SparkSession</code>. Instead of <code class="language-plaintext highlighter-rouge">sparkR.init()</code>, call <code class="language-plaintext highlighter-rouge">sparkR.session()</code> in its place to instantiate the SparkSession. Once that is done, that currently active SparkSession will be used for SparkDataFrame operations.</li>
<li>The parameter <code class="language-plaintext highlighter-rouge">sparkExecutorEnv</code> is not supported by <code class="language-plaintext highlighter-rouge">sparkR.session</code>. To set environment for the executors, set Spark config properties with the prefix &#8220;spark.executorEnv.VAR_NAME&#8221;, for example, &#8220;spark.executorEnv.PATH&#8221;</li>
<li>The <code class="language-plaintext highlighter-rouge">sqlContext</code> parameter is no longer required for these functions: <code class="language-plaintext highlighter-rouge">createDataFrame</code>, <code class="language-plaintext highlighter-rouge">as.DataFrame</code>, <code class="language-plaintext highlighter-rouge">read.json</code>, <code class="language-plaintext highlighter-rouge">jsonFile</code>, <code class="language-plaintext highlighter-rouge">read.parquet</code>, <code class="language-plaintext highlighter-rouge">parquetFile</code>, <code class="language-plaintext highlighter-rouge">read.text</code>, <code class="language-plaintext highlighter-rouge">sql</code>, <code class="language-plaintext highlighter-rouge">tables</code>, <code class="language-plaintext highlighter-rouge">tableNames</code>, <code class="language-plaintext highlighter-rouge">cacheTable</code>, <code class="language-plaintext highlighter-rouge">uncacheTable</code>, <code class="language-plaintext highlighter-rouge">clearCache</code>, <code class="language-plaintext highlighter-rouge">dropTempTable</code>, <code class="language-plaintext highlighter-rouge">read.df</code>, <code class="language-plaintext highlighter-rouge">loadDF</code>, <code class="language-plaintext highlighter-rouge">createExternalTable</code>.</li>
<li>The method <code class="language-plaintext highlighter-rouge">registerTempTable</code> has been deprecated to be replaced by <code class="language-plaintext highlighter-rouge">createOrReplaceTempView</code>.</li>
<li>The method <code class="language-plaintext highlighter-rouge">dropTempTable</code> has been deprecated to be replaced by <code class="language-plaintext highlighter-rouge">dropTempView</code>.</li>
<li>The <code class="language-plaintext highlighter-rouge">sc</code> SparkContext parameter is no longer required for these functions: <code class="language-plaintext highlighter-rouge">setJobGroup</code>, <code class="language-plaintext highlighter-rouge">clearJobGroup</code>, <code class="language-plaintext highlighter-rouge">cancelJobGroup</code></li>
</ul>
<h2 id="upgrading-from-sparkr-15-to-16">Upgrading from SparkR 1.5 to 1.6</h2>
<ul>
<li>Before Spark 1.6.0, the default mode for writes was <code class="language-plaintext highlighter-rouge">append</code>. It was changed in Spark 1.6.0 to <code class="language-plaintext highlighter-rouge">error</code> to match the Scala API.</li>
<li>SparkSQL converts <code class="language-plaintext highlighter-rouge">NA</code> in R to <code class="language-plaintext highlighter-rouge">null</code> and vice-versa.</li>
<li>Since 1.6.1, withColumn method in SparkR supports adding a new column to or replacing existing columns
of the same name of a DataFrame.</li>
</ul>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.5.1.min.js"></script>
<script src="js/vendor/bootstrap.bundle.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:3.4.3"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>