blob: 93cae71ba20412a4f55640a993a819ac5dc4b6dc [file] [log] [blame]
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>PySpark Overview &#8212; PySpark 3.5.4 documentation</title>
<link href="_static/styles/theme.css?digest=1999514e3f237ded88cf" rel="stylesheet">
<link href="_static/styles/pydata-sphinx-theme.css?digest=1999514e3f237ded88cf" rel="stylesheet">
<link rel="stylesheet"
href="_static/vendor/fontawesome/5.13.0/css/all.min.css">
<link rel="preload" as="font" type="font/woff2" crossorigin
href="_static/vendor/fontawesome/5.13.0/webfonts/fa-solid-900.woff2">
<link rel="preload" as="font" type="font/woff2" crossorigin
href="_static/vendor/fontawesome/5.13.0/webfonts/fa-brands-400.woff2">
<link rel="stylesheet" href="_static/styles/pydata-sphinx-theme.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<link rel="stylesheet" type="text/css" href="_static/copybutton.css" />
<link rel="stylesheet" type="text/css" href="_static/css/pyspark.css" />
<link rel="preload" as="script" href="_static/scripts/pydata-sphinx-theme.js?digest=1999514e3f237ded88cf">
<script id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script src="_static/jquery.js"></script>
<script src="_static/underscore.js"></script>
<script src="_static/doctools.js"></script>
<script src="_static/language_data.js"></script>
<script src="_static/clipboard.min.js"></script>
<script src="_static/copybutton.js"></script>
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/x-mathjax-config">MathJax.Hub.Config({"tex2jax": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true, "ignoreClass": "document", "processClass": "math|output_area"}})</script>
<link rel="canonical" href="https://spark.apache.org/docs/latest/api/python/index.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="Getting Started" href="getting_started/index.html" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="docsearch:language" content="None">
<!-- Google Analytics -->
</head>
<body data-spy="scroll" data-target="#bd-toc-nav" data-offset="80">
<div class="container-fluid" id="banner"></div>
<nav class="navbar navbar-light navbar-expand-lg bg-light fixed-top bd-navbar" id="navbar-main"><div class="container-xl">
<div id="navbar-start">
<a class="navbar-brand" href="#">
<img src="_static/spark-logo-reverse.png" class="logo" alt="logo">
</a>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbar-collapsible" aria-controls="navbar-collapsible" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div id="navbar-collapsible" class="col-lg-9 collapse navbar-collapse">
<div id="navbar-center" class="mr-auto">
<div class="navbar-center-item">
<ul id="navbar-main-elements" class="navbar-nav">
<li class="toctree-l1 current active nav-item">
<a class="current reference internal nav-link" href="#">
Overview
</a>
</li>
<li class="toctree-l1 nav-item">
<a class="reference internal nav-link" href="getting_started/index.html">
Getting Started
</a>
</li>
<li class="toctree-l1 nav-item">
<a class="reference internal nav-link" href="user_guide/index.html">
User Guides
</a>
</li>
<li class="toctree-l1 nav-item">
<a class="reference internal nav-link" href="reference/index.html">
API Reference
</a>
</li>
<li class="toctree-l1 nav-item">
<a class="reference internal nav-link" href="development/index.html">
Development
</a>
</li>
<li class="toctree-l1 nav-item">
<a class="reference internal nav-link" href="migration_guide/index.html">
Migration Guides
</a>
</li>
</ul>
</div>
</div>
<div id="navbar-end">
<div class="navbar-end-item">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<div id="version-button" class="dropdown">
<button type="button" class="btn btn-secondary btn-sm navbar-btn dropdown-toggle" id="version_switcher_button" data-toggle="dropdown">
3.5.4
<span class="caret"></span>
</button>
<div id="version_switcher" class="dropdown-menu list-group-flush py-0" aria-labelledby="version_switcher_button">
<!-- dropdown will be populated by javascript on page load -->
</div>
</div>
<script type="text/javascript">
// Function to construct the target URL from the JSON components
function buildURL(entry) {
var template = "https://spark.apache.org/docs/{version}/api/python/index.html"; // supplied by jinja
template = template.replace("{version}", entry.version);
return template;
}
// Function to check if corresponding page path exists in other version of docs
// and, if so, go there instead of the homepage of the other docs version
function checkPageExistsAndRedirect(event) {
const currentFilePath = "index.html",
otherDocsHomepage = event.target.getAttribute("href");
let tryUrl = `${otherDocsHomepage}${currentFilePath}`;
$.ajax({
type: 'HEAD',
url: tryUrl,
// if the page exists, go there
success: function() {
location.href = tryUrl;
}
}).fail(function() {
location.href = otherDocsHomepage;
});
return false;
}
// Function to populate the version switcher
(function () {
// get JSON config
$.getJSON("https://spark.apache.org/static/versions.json", function(data, textStatus, jqXHR) {
// create the nodes first (before AJAX calls) to ensure the order is
// correct (for now, links will go to doc version homepage)
$.each(data, function(index, entry) {
// if no custom name specified (e.g., "latest"), use version string
if (!("name" in entry)) {
entry.name = entry.version;
}
// construct the appropriate URL, and add it to the dropdown
entry.url = buildURL(entry);
const node = document.createElement("a");
node.setAttribute("class", "list-group-item list-group-item-action py-1");
node.setAttribute("href", `${entry.url}`);
node.textContent = `${entry.name}`;
node.onclick = checkPageExistsAndRedirect;
$("#version_switcher").append(node);
});
});
})();
</script>
</div>
</div>
</div>
</div>
</nav>
<div class="container-xl">
<div class="row">
<!-- Only show if we have sidebars configured, else just a small margin -->
<div class="col-12 col-md-3 bd-sidebar">
<div class="sidebar-start-items"><form class="bd-search d-flex align-items-center" action="search.html" method="get">
<i class="icon fas fa-search"></i>
<input type="search" class="form-control" name="q" id="search-input" placeholder="Search the docs ..." aria-label="Search the docs ..." autocomplete="off" >
</form><nav class="bd-links" id="bd-docs-nav" aria-label="Main navigation">
<div class="bd-toc-item active">
</div>
</nav>
</div>
<div class="sidebar-end-items">
</div>
</div>
<div class="d-none d-xl-block col-xl-2 bd-toc">
<div class="toc-item">
<nav id="bd-toc-nav">
</nav>
</div>
<div class="toc-item">
</div>
</div>
<main class="col-12 col-md-9 col-xl-7 py-md-5 pl-md-5 pr-md-4 bd-content" role="main">
<div>
<div class="section" id="pyspark-overview">
<h1>PySpark Overview<a class="headerlink" href="#pyspark-overview" title="Permalink to this headline"></a></h1>
<p><strong>Date</strong>: Dec 17, 2024 <strong>Version</strong>: 3.5.4</p>
<p><strong>Useful links</strong>:
<a class="reference external" href="https://mybinder.org/v2/gh/apache/spark/a6f220d9517?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart_df.ipynb">Live Notebook</a> | <a class="reference external" href="https://github.com/apache/spark">GitHub</a> | <a class="reference external" href="https://issues.apache.org/jira/projects/SPARK/issues">Issues</a> | <a class="reference external" href="https://github.com/apache/spark/tree/a6f220d9517/examples/src/main/python">Examples</a> | <a class="reference external" href="https://spark.apache.org/community.html">Community</a></p>
<p>PySpark is the Python API for Apache Spark. It enables you to perform real-time,
large-scale data processing in a distributed environment using Python. It also provides a PySpark
shell for interactively analyzing your data.</p>
<p>PySpark combines Python’s learnability and ease of use with the power of Apache Spark
to enable processing and analysis of data at any size for everyone familiar with Python.</p>
<p>PySpark supports all of Spark’s features such as Spark SQL,
DataFrames, Structured Streaming, Machine Learning (MLlib) and Spark Core.</p>
<table class="colwidths-given borderless spec-table table">
<colgroup>
<col style="width: 10%" />
<col style="width: 20%" />
<col style="width: 20%" />
<col style="width: 20%" />
<col style="width: 20%" />
<col style="width: 10%" />
</colgroup>
<tbody>
<tr class="row-odd"><td></td>
<td><a class="reference external image-reference" href="reference/pyspark.sql/index.html"><img alt="Spark SQL" src="_images/pyspark-spark_sql_and_dataframes.png" style="width: 100%;" /></a>
</td>
<td><a class="reference external image-reference" href="reference/pyspark.pandas/index.html"><img alt="Pandas API on Spark" src="_images/pyspark-pandas_api_on_spark.png" style="width: 100%;" /></a>
</td>
<td><a class="reference external image-reference" href="reference/pyspark.ss/index.html"><img alt="Streaming" src="_images/pyspark-structured_streaming.png" style="width: 100%;" /></a>
</td>
<td><a class="reference external image-reference" href="reference/pyspark.ml.html"><img alt="Machine Learning" src="_images/pyspark-machine_learning.png" style="width: 100%;" /></a>
</td>
<td></td>
</tr>
</tbody>
</table>
<table class="colwidths-given borderless spec-table table">
<colgroup>
<col style="width: 10%" />
<col style="width: 80%" />
<col style="width: 10%" />
</colgroup>
<tbody>
<tr class="row-odd"><td></td>
<td><a class="reference external image-reference" href="reference/pyspark.html"><img alt="Spark Core and RDDs" src="_images/pyspark-spark_core_and_rdds.png" style="width: 100%;" /></a>
</td>
<td></td>
</tr>
</tbody>
</table>
<p id="index-page-spark-sql-and-dataframes"><strong>Spark SQL and DataFrames</strong></p>
<p>Spark SQL is Apache Spark’s module for working with structured data.
It allows you to seamlessly mix SQL queries with Spark programs.
With PySpark DataFrames you can efficiently read, write, transform,
and analyze data using Python and SQL.
Whether you use Python or SQL, the same underlying execution
engine is used so you will always leverage the full power of Spark.</p>
<ul class="simple">
<li><p><a class="reference internal" href="getting_started/quickstart_df.html"><span class="std std-ref">Quickstart: DataFrame</span></a></p></li>
<li><p><a class="reference external" href="https://mybinder.org/v2/gh/apache/spark/a6f220d9517?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart_df.ipynb">Live Notebook: DataFrame</a></p></li>
<li><p><a class="reference internal" href="reference/pyspark.sql/index.html"><span class="std std-ref">Spark SQL API Reference</span></a></p></li>
</ul>
<p><strong>Pandas API on Spark</strong></p>
<p>Pandas API on Spark allows you to scale your pandas workload to any size
by running it distributed across multiple nodes. If you are already familiar
with pandas and want to leverage Spark for big data, pandas API on Spark makes
you immediately productive and lets you migrate your applications without modifying the code.
You can have a single codebase that works both with pandas (tests, smaller datasets)
and with Spark (production, distributed datasets) and you can switch between the
pandas API and the Pandas API on Spark easily and without overhead.</p>
<p>Pandas API on Spark aims to make the transition from pandas to Spark easy but
if you are new to Spark or deciding which API to use, we recommend using PySpark
(see <a class="reference internal" href="#index-page-spark-sql-and-dataframes"><span class="std std-ref">Spark SQL and DataFrames</span></a>).</p>
<ul class="simple">
<li><p><a class="reference internal" href="getting_started/quickstart_ps.html"><span class="std std-ref">Quickstart: Pandas API on Spark</span></a></p></li>
<li><p><a class="reference external" href="https://mybinder.org/v2/gh/apache/spark/a6f220d9517?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart_ps.ipynb">Live Notebook: pandas API on Spark</a></p></li>
<li><p><a class="reference internal" href="reference/pyspark.pandas/index.html"><span class="std std-ref">Pandas API on Spark Reference</span></a></p></li>
</ul>
<p id="index-page-structured-streaming"><strong>Structured Streaming</strong></p>
<p>Structured Streaming is a scalable and fault-tolerant stream processing engine built on the Spark SQL engine.
You can express your streaming computation the same way you would express a batch computation on static data.
The Spark SQL engine will take care of running it incrementally and continuously and updating the final result
as streaming data continues to arrive.</p>
<ul class="simple">
<li><p><a class="reference external" href="https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html">Structured Streaming Programming Guide</a></p></li>
<li><p><a class="reference internal" href="reference/pyspark.ss/index.html"><span class="std std-ref">Structured Streaming API Reference</span></a></p></li>
</ul>
<p><strong>Machine Learning (MLlib)</strong></p>
<p>Built on top of Spark, MLlib is a scalable machine learning library that provides
a uniform set of high-level APIs that help users create and tune practical machine
learning pipelines.</p>
<ul class="simple">
<li><p><a class="reference external" href="https://spark.apache.org/docs/latest/ml-guide.html">Machine Learning Library (MLlib) Programming Guide</a></p></li>
<li><p><a class="reference internal" href="reference/pyspark.ml.html"><span class="std std-ref">Machine Learning (MLlib) API Reference</span></a></p></li>
</ul>
<p><strong>Spark Core and RDDs</strong></p>
<p>Spark Core is the underlying general execution engine for the Spark platform that all
other functionality is built on top of. It provides RDDs (Resilient Distributed Datasets)
and in-memory computing capabilities.</p>
<p>Note that the RDD API is a low-level API which can be difficult to use and you do not get
the benefit of Spark’s automatic query optimization capabilities.
We recommend using DataFrames (see <a class="reference internal" href="#index-page-spark-sql-and-dataframes"><span class="std std-ref">Spark SQL and DataFrames</span></a> above)
instead of RDDs as it allows you to express what you want more easily and lets Spark automatically
construct the most efficient query for you.</p>
<ul class="simple">
<li><p><a class="reference internal" href="reference/pyspark.html"><span class="std std-ref">Spark Core API Reference</span></a></p></li>
</ul>
<p><strong>Spark Streaming (Legacy)</strong></p>
<p>Spark Streaming is an extension of the core Spark API that enables scalable,
high-throughput, fault-tolerant stream processing of live data streams.</p>
<p>Note that Spark Streaming is the previous generation of Spark’s streaming engine.
It is a legacy project and it is no longer being updated.
There is a newer and easier to use streaming engine in Spark called
<a class="reference internal" href="#index-page-structured-streaming"><span class="std std-ref">Structured Streaming</span></a> which you
should use for your streaming applications and pipelines.</p>
<ul class="simple">
<li><p><a class="reference external" href="https://spark.apache.org/docs/latest/streaming-programming-guide.html">Spark Streaming Programming Guide (Legacy)</a></p></li>
<li><p><a class="reference internal" href="reference/pyspark.streaming.html"><span class="std std-ref">Spark Streaming API Reference (Legacy)</span></a></p></li>
</ul>
<div class="toctree-wrapper compound">
</div>
</div>
</div>
<!-- Previous / next buttons -->
<div class='prev-next-area'>
<a class='right-next' id="next-link" href="getting_started/index.html" title="next page">
<div class="prev-next-info">
<p class="prev-next-subtitle">next</p>
<p class="prev-next-title">Getting Started</p>
</div>
<i class="fas fa-angle-right"></i>
</a>
</div>
</main>
</div>
</div>
<script src="_static/scripts/pydata-sphinx-theme.js?digest=1999514e3f237ded88cf"></script>
<footer class="footer mt-5 mt-md-0">
<div class="container">
<div class="footer-item">
<p class="copyright">
&copy; Copyright .<br>
</p>
</div>
<div class="footer-item">
<p class="sphinx-version">
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 3.0.4.<br>
</p>
</div>
</div>
</footer>
</body>
</html>