blob: 711283b9684371d0157cf6dddffb6b2e02371af1 [file] [log] [blame]
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>
Spark Release 0.8.0 | Apache Spark
</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css" rel="stylesheet"
integrity="sha384-EVSTQN3/azprG1Anm3QDgpJLIm9Nao0Yz1ztcQTwFspd3yD65VohhpuuCOmLASjC" crossorigin="anonymous">
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&Courier+Prime:wght@400;700&display=swap" rel="stylesheet">
<link href="/css/custom.css" rel="stylesheet">
<!-- Code highlighter CSS -->
<link href="/css/pygments-default.css" rel="stylesheet">
<link rel="icon" href="/favicon.ico" type="image/x-icon">
<!-- Matomo -->
<script>
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body class="global">
<nav class="navbar navbar-expand-lg navbar-dark p-0 px-4" style="background: #1D6890;">
<a class="navbar-brand" href="/">
<img src="/images/spark-logo-rev.svg" alt="" width="141" height="72">
</a>
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarContent"
aria-controls="navbarContent" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse col-md-12 col-lg-auto pt-4" id="navbarContent">
<ul class="navbar-nav me-auto">
<li class="nav-item">
<a class="nav-link active" aria-current="page" href="/downloads.html">Download</a>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="libraries" role="button" data-bs-toggle="dropdown"
aria-expanded="false">
Libraries
</a>
<ul class="dropdown-menu" aria-labelledby="libraries">
<li><a class="dropdown-item" href="/sql/">SQL and DataFrames</a></li>
<li><a class="dropdown-item" href="/spark-connect/">Spark Connect</a></li>
<li><a class="dropdown-item" href="/streaming/">Spark Streaming</a></li>
<li><a class="dropdown-item" href="/pandas-on-spark/">pandas on Spark</a></li>
<li><a class="dropdown-item" href="/mllib/">MLlib (machine learning)</a></li>
<li><a class="dropdown-item" href="/graphx/">GraphX (graph)</a></li>
<li>
<hr class="dropdown-divider">
</li>
<li><a class="dropdown-item" href="/third-party-projects.html">Third-Party Projects</a></li>
</ul>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="documentation" role="button" data-bs-toggle="dropdown"
aria-expanded="false">
Documentation
</a>
<ul class="dropdown-menu" aria-labelledby="documentation">
<li><a class="dropdown-item" href="/docs/latest/">Latest Release</a></li>
<li><a class="dropdown-item" href="/documentation.html">Older Versions and Other Resources</a></li>
<li><a class="dropdown-item" href="/faq.html">Frequently Asked Questions</a></li>
</ul>
</li>
<li class="nav-item">
<a class="nav-link active" aria-current="page" href="/examples.html">Examples</a>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="community" role="button" data-bs-toggle="dropdown"
aria-expanded="false">
Community
</a>
<ul class="dropdown-menu" aria-labelledby="community">
<li><a class="dropdown-item" href="/community.html">Mailing Lists &amp; Resources</a></li>
<li><a class="dropdown-item" href="/contributing.html">Contributing to Spark</a></li>
<li><a class="dropdown-item" href="/improvement-proposals.html">Improvement Proposals (SPIP)</a>
</li>
<li><a class="dropdown-item" href="https://issues.apache.org/jira/browse/SPARK">Issue Tracker</a>
</li>
<li><a class="dropdown-item" href="/powered-by.html">Powered By</a></li>
<li><a class="dropdown-item" href="/committers.html">Project Committers</a></li>
<li><a class="dropdown-item" href="/history.html">Project History</a></li>
</ul>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="developers" role="button" data-bs-toggle="dropdown"
aria-expanded="false">
Developers
</a>
<ul class="dropdown-menu" aria-labelledby="developers">
<li><a class="dropdown-item" href="/developer-tools.html">Useful Developer Tools</a></li>
<li><a class="dropdown-item" href="/versioning-policy.html">Versioning Policy</a></li>
<li><a class="dropdown-item" href="/release-process.html">Release Process</a></li>
<li><a class="dropdown-item" href="/security.html">Security</a></li>
</ul>
</li>
</ul>
<ul class="navbar-nav ml-auto">
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="apacheFoundation" role="button"
data-bs-toggle="dropdown" aria-expanded="false">
Apache Software Foundation
</a>
<ul class="dropdown-menu" aria-labelledby="apacheFoundation">
<li><a class="dropdown-item" href="https://www.apache.org/">Apache Homepage</a></li>
<li><a class="dropdown-item" href="https://www.apache.org/licenses/">License</a></li>
<li><a class="dropdown-item"
href="https://www.apache.org/foundation/sponsorship.html">Sponsorship</a></li>
<li><a class="dropdown-item" href="https://www.apache.org/foundation/thanks.html">Thanks</a></li>
<li><a class="dropdown-item" href="https://www.apache.org/security/">Security</a></li>
<li><a class="dropdown-item" href="https://www.apache.org/events/current-event">Event</a></li>
</ul>
</li>
</ul>
</div>
</nav>
<div class="container">
<div class="row mt-4">
<div class="col-12 col-md-9">
<h2>Spark Release 0.8.0</h2>
<p>Apache Spark 0.8.0 is a major release that includes many new capabilities and usability improvements. It’s also our first release in the Apache incubator. It is the largest Spark release yet, with contributions from 67 developers and 24 companies.</p>
<p>You can download Spark 0.8.0 as either a <a href="http://spark-project.org/download/spark-0.8.0-incubating.tgz">source package</a> (4 MB tar.gz) or a prebuilt pacakge for <a href="http://spark-project.org/download/spark-0.8.0-incubating-bin-hadoop1.tgz">Hadoop 1 / CDH3</a> or <a href="http://spark-project.org/download/spark-0.8.0-incubating-bin-cdh4.tgz">CDH4</a> (125 MB tar.gz). Release signatures and checksums are available at the official <a href="http://www.apache.org/dist/incubator/spark/spark-0.8.0-incubating/">Apache download site</a>.</p>
<h3 id="monitoring-ui-and-metrics">Monitoring UI and Metrics</h3>
<p>Spark now displays a variety of monitoring data in a web UI (by default at port 4040 on the driver node). A new job dashboard contains information about running, succeeded, and failed jobs, including percentile statistics covering task runtime, shuffled data, and garbage collection. The existing storage dashboard has been extended, and additional pages have been added to display total storage and task information per-executor. Finally, a new metrics library exposes internal Spark metrics through various API’s including JMX and Ganglia.</p>
<p style="text-align: center;">
<img src="/images/0.8.0-ui-screenshot.png" style="width:90%;" />
</p>
<h3 id="machine-learning-library">Machine Learning Library</h3>
<p>This release introduces MLlib, a standard library of high-quality machine learning and optimization algorithms for Spark. MLlib was developed in collaboration with the <a href="http://www.mlbase.org/">UC Berkeley MLbase project</a>. The current library contains seven algorithms, including support vector machines (SVMs), logistic regression, several regularized variants of linear regression, a clustering algorithm (KMeans), and alternating least squares collaborative filtering.</p>
<h3 id="python-improvements">Python Improvements</h3>
<p>The Python API has been extended with many previously missing features. This includes support for different storage levels, sampling, and various missing RDD operators. We’ve also added support for running Spark in <a href="http://ipython.org/">IPython</a>, including the IPython Notebook, and for running PySpark on Windows.</p>
<h3 id="hadoop-yarn-support">Hadoop YARN support</h3>
<p>Spark 0.8 add greatly improved support for running standalone Spark jobs on a YARN cluster. The YARN support is no longer experimental but now part of mainline Spark. Support for running against a secured YARN cluster has also been added.</p>
<h3 id="revamped-job-scheduler">Revamped Job Scheduler</h3>
<p>Spark’s internal job scheduler has been refactored and extended to include more sophisticated scheduling policies. In particular, a <a href="http://spark.incubator.apache.org/docs/0.8.0/job-scheduling.html#scheduling-within-an-application">fair scheduler</a> implementation now allows multiple users to share an instance of Spark, which helps users running shorter jobs to achieve good performance, even when longer-running jobs are running in parallel. Support for topology-aware scheduling has been extended, including the ability to take into account rack locality and support for multiple executors on a single machine.</p>
<h3 id="easier-deployment-and-linking">Easier Deployment and Linking</h3>
<p>User programs can now link to Spark no matter which Hadoop version they need, without having to publish a version of <code class="language-plaintext highlighter-rouge">spark-core</code> specifically for that Hadoop version. An explanation of how to link against different Hadoop versions is provided <a href="http://spark.incubator.apache.org/docs/0.8.0/scala-programming-guide.html#linking-with-spark">here</a>.</p>
<h3 id="expanded-ec2-capabilities">Expanded EC2 Capabilities</h3>
<p>Spark’s EC2 scripts now support launching in any availability zone. Support has also been added for EC2 instance types which use the newer “HVM” architecture. This includes the cluster compute (cc1/cc2) family of instance types. We’ve also added support for running newer versions of HDFS alongside Spark. Finally, we’ve added the ability to launch clusters with maintenance releases of Spark in addition to launching the newest release.</p>
<h3 id="improved-documentation">Improved Documentation</h3>
<p>This release adds documentation about cluster hardware provisioning and inter-operation with common Hadoop distributions. Docs are also included to cover the MLlib machine learning functions and new cluster monitoring features. Existing documentation has been updated to reflect changes in building and deploying Spark.</p>
<h3 id="other-improvements">Other Improvements</h3>
<ul>
<li>RDDs can now manually be dropped from memory with <code class="language-plaintext highlighter-rouge">unpersist</code>.</li>
<li>The RDD class includes the following new operations: <code class="language-plaintext highlighter-rouge">takeOrdered</code>, <code class="language-plaintext highlighter-rouge">zipPartitions</code>, <code class="language-plaintext highlighter-rouge">top</code>.</li>
<li>A <code class="language-plaintext highlighter-rouge">JobLogger</code> class has been added to produce archivable logs of a Spark workload.</li>
<li>The <code class="language-plaintext highlighter-rouge">RDD.coalesce</code> function now takes into account locality.</li>
<li>The <code class="language-plaintext highlighter-rouge">RDD.pipe</code> function has been extended to support passing environment variables to child processes.</li>
<li>Hadoop <code class="language-plaintext highlighter-rouge">save</code> functions now support an optional compression codec.</li>
<li>You can now create a binary distribution of Spark which depends only on a Java runtime for easier deployment on a cluster.</li>
<li>The examples build has been isolated from the core build, substantially reducing the potential for dependency conflicts.</li>
<li>The Spark Streaming Twitter API has been updated to use OAuth authentication instead of the deprecated username/password authentication in Spark 0.7.0.</li>
<li>Several new example jobs have been added, including PageRank implementations in Java, Scala and Python, examples for accessing HBase and Cassandra, and MLlib examples.</li>
<li>Support for running on Mesos has been improved &#8211; now you can deploy a Spark assembly JAR as part of the Mesos job, instead of having Spark pre-installed on each machine. The default Mesos version has also been updated to 0.13.</li>
<li>This release includes various optimizations to PySpark and to the job scheduler.</li>
</ul>
<h3 id="compatibility">Compatibility</h3>
<ul>
<li><strong>This release changes Spark’s package name to &#8216;org.apache.spark&#8217;</strong>, so those upgrading from Spark 0.7 will need to adjust their imports accordingly. In addition, we’ve moved the <code class="language-plaintext highlighter-rouge">RDD</code> class to the org.apache.spark.rdd package (it was previously in the top-level package). The Spark artifacts published through Maven have also changed to the new package name.</li>
<li>In the Java API, use of Scala’s <code class="language-plaintext highlighter-rouge">Option</code> class has been replaced with <code class="language-plaintext highlighter-rouge">Optional</code> from the Guava library.</li>
<li>Linking against Spark for arbitrary Hadoop versions is now possible by specifying a dependency on <code class="language-plaintext highlighter-rouge">hadoop-client</code>, instead of rebuilding <code class="language-plaintext highlighter-rouge">spark-core</code> against your version of Hadoop. See the documentation <a href="http://spark.incubator.apache.org/docs/0.8.0/scala-programming-guide.html#linking-with-spark">here</a> for details.</li>
<li>If you are building Spark, you’ll now need to run <code class="language-plaintext highlighter-rouge">sbt/sbt assembly</code> instead of <code class="language-plaintext highlighter-rouge">package</code>.</li>
</ul>
<h3 id="credits">Credits</h3>
<p>Spark 0.8.0 was the result of the largest team of contributors yet. The following developers contributed to this release:</p>
<ul>
<li>Andrew Ash &#8211; documentation, code cleanup and logging improvements</li>
<li>Mikhail Bautin &#8211; bug fix</li>
<li>Konstantin Boudnik &#8211; Maven build, bug fixes, and documentation</li>
<li>Ian Buss &#8211; sbt configuration improvement</li>
<li>Evan Chan &#8211; API improvement, bug fix, and documentation</li>
<li>Lian Cheng &#8211; bug fix</li>
<li>Tathagata Das &#8211; performance improvement in streaming receiver and streaming bug fix</li>
<li>Aaron Davidson &#8211; Python improvements, bug fix, and unit tests</li>
<li>Giovanni Delussu &#8211; coalesced RDD feature</li>
<li>Joseph E. Gonzalez &#8211; improvement to zipPartitions</li>
<li>Karen Feng &#8211; several improvements to web UI</li>
<li>Andy Feng &#8211; HDFS metrics</li>
<li>Ali Ghodsi &#8211; configuration improvements and locality-aware coalesce</li>
<li>Christoph Grothaus &#8211; bug fix</li>
<li>Thomas Graves &#8211; support for secure YARN cluster and various YARN-related improvements</li>
<li>Stephen Haberman &#8211; bug fix, documentation, and code cleanup</li>
<li>Mark Hamstra &#8211; bug fixes and Maven build</li>
<li>Benjamin Hindman &#8211; Mesos compatibility and documentation</li>
<li>Liang-Chi Hsieh &#8211; bug fixes in build and in YARN mode</li>
<li>Shane Huang &#8211; shuffle improvements, bug fix</li>
<li>Ethan Jewett &#8211; Spark/HBase example</li>
<li>Holden Karau &#8211; bug fix and EC2 improvement</li>
<li>Kody Koeniger &#8211; JDBV RDD implementation</li>
<li>Andy Konwinski &#8211; documentation</li>
<li>Jey Kottalam &#8211; PySpark optimizations, Hadoop agnostic build (lead), and bug fixes</li>
<li>Andrey Kouznetsov &#8211; Bug fix</li>
<li>S. Kumar &#8211; Spark Streaming example</li>
<li>Ryan LeCompte &#8211; topK method optimization and serialization improvements</li>
<li>Gavin Li &#8211; compression codecs and pipe support</li>
<li>Harold Lim &#8211; fair scheduler</li>
<li>Dmitriy Lyubimov &#8211; bug fix</li>
<li>Chris Mattmann &#8211; Apache mentor</li>
<li>David McCauley &#8211; JSON API improvement</li>
<li>Sean McNamara &#8211; added <code class="language-plaintext highlighter-rouge">takeOrdered</code> function, bug fixes, and a build fix</li>
<li>Mridul Muralidharan &#8211; YARN integration (lead) and scheduler improvements</li>
<li>Marc Mercer &#8211; improvements to UI json output</li>
<li>Christopher Nguyen &#8211; bug fixes</li>
<li>Erik van Oosten &#8211; example fix</li>
<li>Kay Ousterhout &#8211; fix for scheduler regression and bug fixes</li>
<li>Xinghao Pan &#8211; MLLib contributions</li>
<li>Hiral Patel &#8211; bug fix</li>
<li>James Phillpotts &#8211; updated Twitter API for Spark streaming</li>
<li>Nick Pentreath &#8211; scala pageRank example, bagel improvement, and several Java examples</li>
<li>Alexander Pivovarov &#8211; logging improvement and Maven build</li>
<li>Mike Potts &#8211; configuration improvement</li>
<li>Rohit Rai &#8211; Spark/Cassandra example</li>
<li>Imran Rashid &#8211; bug fixes and UI improvement</li>
<li>Charles Reiss &#8211; bug fixes, code cleanup, performance improvements</li>
<li>Josh Rosen &#8211; Python API improvements, Java API improvements, EC2 scripts and bug fixes</li>
<li>Henry Saputra &#8211; Apache mentor</li>
<li>Jerry Shao &#8211; bug fixes, metrics system</li>
<li>Prashant Sharma &#8211; documentation</li>
<li>Mingfei Shi &#8211; joblogger and bug fix</li>
<li>Andre Schumacher &#8211; several PySpark features</li>
<li>Ginger Smith &#8211; MLLib contribution</li>
<li>Evan Sparks &#8211; contributions to MLLib</li>
<li>Ram Sriharsha &#8211; bug fix and RDD removal feature</li>
<li>Ameet Talwalkar &#8211; MLlib contributions</li>
<li>Roman Tkalenko &#8211; code refactoring and cleanup</li>
<li>Chu Tong &#8211; Java PageRank algorithm and bug fix in bash scripts</li>
<li>Shivaram Venkataraman &#8211; bug fixes, contributions to MLLib, netty shuffle fixes, and Java API additions</li>
<li>Patrick Wendell &#8211; release manager, bug fixes, documentation, metrics system, and web UI</li>
<li>Andrew Xia &#8211; fair scheduler (lead), metrics system, and ui improvements</li>
<li>Reynold Xin &#8211; shuffle improvements, bug fixes, code refactoring, usability improvements, MLLib contributions</li>
<li>Matei Zaharia &#8211; MLLib contributions, documentation, examples, UI improvements, PySpark improvements, and bug fixes</li>
<li>Wu Zeming &#8211; bug fix in scheduler</li>
<li>Bill Zhao &#8211; log message improvement</li>
</ul>
<p>Thanks to everyone who contributed!
We’d especially like to thank Patrick Wendell for acting as the release manager for this release.</p>
<p>
<br/>
<a href="/news/">Spark News Archive</a>
</p>
</div>
<div class="col-12 col-md-3">
<div class="news" style="margin-bottom: 20px;">
<h5>Latest News</h5>
<ul class="list-unstyled">
<li><a href="/news/spark-3-4-3-released.html">Spark 3.4.3 released</a>
<span class="small">(Apr 18, 2024)</span></li>
<li><a href="/news/spark-3-5-1-released.html">Spark 3.5.1 released</a>
<span class="small">(Feb 23, 2024)</span></li>
<li><a href="/news/spark-3-3-4-released.html">Spark 3.3.4 released</a>
<span class="small">(Dec 16, 2023)</span></li>
<li><a href="/news/spark-3-4-2-released.html">Spark 3.4.2 released</a>
<span class="small">(Nov 30, 2023)</span></li>
</ul>
<p class="small" style="text-align: right;"><a href="/news/index.html">Archive</a></p>
</div>
<div style="text-align:center; margin-bottom: 20px;">
<a href="https://www.apache.org/events/current-event.html">
<img src="https://www.apache.org/events/current-event-234x60.png" style="max-width: 100%;"/>
</a>
</div>
<div class="hidden-xs hidden-sm">
<a href="/downloads.html" class="btn btn-cta btn-lg d-grid" style="margin-bottom: 30px;">
Download Spark
</a>
<p style="font-size: 16px; font-weight: 500; color: #555;">
Built-in Libraries:
</p>
<ul class="list-none">
<li><a href="/sql/">SQL and DataFrames</a></li>
<li><a href="/streaming/">Spark Streaming</a></li>
<li><a href="/mllib/">MLlib (machine learning)</a></li>
<li><a href="/graphx/">GraphX (graph)</a></li>
</ul>
<a href="/third-party-projects.html">Third-Party Projects</a>
</div>
</div>
</div>
<footer class="small">
<hr>
Apache Spark, Spark, Apache, the Apache feather logo, and the Apache Spark project logo are either registered
trademarks or trademarks of The Apache Software Foundation in the United States and other countries.
See guidance on use of Apache Spark <a href="/trademarks.html">trademarks</a>.
All other marks mentioned may be trademarks or registered trademarks of their respective owners.
Copyright &copy; 2018 The Apache Software Foundation, Licensed under the
<a href="https://www.apache.org/licenses/">Apache License, Version 2.0</a>.
</footer>
</div>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/js/bootstrap.bundle.min.js"
integrity="sha384-MrcW6ZMFYlzcLA8Nl+NtUVF0sA7MsXsP1UyJoMp4YLEuNSfAP+JcXn/tWtIaxVXM"
crossorigin="anonymous"></script>
<script src="https://code.jquery.com/jquery.js"></script>
<script src="/js/lang-tabs.js"></script>
<script src="/js/downloads.js"></script>
</body>
</html>