| |
| <!DOCTYPE html> |
| <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]--> |
| <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]--> |
| <!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]--> |
| <!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]--> |
| <head> |
| <meta charset="utf-8"> |
| <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> |
| |
| <title>Quick Start - Spark 3.5.0 Documentation</title> |
| |
| <meta name="description" content="Quick start tutorial for Spark 3.5.0"> |
| |
| |
| |
| |
| |
| <link rel="stylesheet" href="css/bootstrap.min.css"> |
| <link rel="preconnect" href="https://fonts.googleapis.com"> |
| <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> |
| <link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&Courier+Prime:wght@400;700&display=swap" rel="stylesheet"> |
| <link href="css/custom.css" rel="stylesheet"> |
| <script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script> |
| |
| <link rel="stylesheet" href="css/pygments-default.css"> |
| <link rel="stylesheet" href="css/docsearch.min.css" /> |
| <link rel="stylesheet" href="css/docsearch.css"> |
| |
| <!-- Matomo --> |
| <script type="text/javascript"> |
| var _paq = window._paq = window._paq || []; |
| /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ |
| _paq.push(["disableCookies"]); |
| _paq.push(['trackPageView']); |
| _paq.push(['enableLinkTracking']); |
| (function() { |
| var u="https://analytics.apache.org/"; |
| _paq.push(['setTrackerUrl', u+'matomo.php']); |
| _paq.push(['setSiteId', '40']); |
| var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; |
| g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s); |
| })(); |
| </script> |
| <!-- End Matomo Code --> |
| </head> |
| <body class="global"> |
| <!--[if lt IE 7]> |
| <p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p> |
| <![endif]--> |
| |
| <!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html --> |
| |
| <nav class="navbar navbar-expand-lg navbar-dark p-0 px-4 fixed-top" style="background: #1d6890;" id="topbar"> |
| <div class="navbar-brand"><a href="index.html"> |
| <img src="img/spark-logo-rev.svg" width="141" height="72"/></a><span class="version">3.5.0</span> |
| </div> |
| <button class="navbar-toggler" type="button" data-toggle="collapse" |
| data-target="#navbarCollapse" aria-controls="navbarCollapse" |
| aria-expanded="false" aria-label="Toggle navigation"> |
| <span class="navbar-toggler-icon"></span> |
| </button> |
| <div class="collapse navbar-collapse" id="navbarCollapse"> |
| <ul class="navbar-nav me-auto"> |
| <li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li> |
| |
| <li class="nav-item dropdown"> |
| <a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a> |
| <div class="dropdown-menu" aria-labelledby="navbarQuickStart"> |
| <a class="dropdown-item" href="quick-start.html">Quick Start</a> |
| <a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a> |
| <a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a> |
| <a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a> |
| <a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a> |
| <a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a> |
| <a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a> |
| <a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a> |
| <a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a> |
| </div> |
| </li> |
| |
| <li class="nav-item dropdown"> |
| <a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a> |
| <div class="dropdown-menu" aria-labelledby="navbarAPIDocs"> |
| <a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a> |
| <a class="dropdown-item" href="api/java/index.html">Java</a> |
| <a class="dropdown-item" href="api/python/index.html">Python</a> |
| <a class="dropdown-item" href="api/R/index.html">R</a> |
| <a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a> |
| </div> |
| </li> |
| |
| <li class="nav-item dropdown"> |
| <a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a> |
| <div class="dropdown-menu" aria-labelledby="navbarDeploying"> |
| <a class="dropdown-item" href="cluster-overview.html">Overview</a> |
| <a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a> |
| <div class="dropdown-divider"></div> |
| <a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a> |
| <a class="dropdown-item" href="running-on-mesos.html">Mesos</a> |
| <a class="dropdown-item" href="running-on-yarn.html">YARN</a> |
| <a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a> |
| </div> |
| </li> |
| |
| <li class="nav-item dropdown"> |
| <a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a> |
| <div class="dropdown-menu" aria-labelledby="navbarMore"> |
| <a class="dropdown-item" href="configuration.html">Configuration</a> |
| <a class="dropdown-item" href="monitoring.html">Monitoring</a> |
| <a class="dropdown-item" href="tuning.html">Tuning Guide</a> |
| <a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a> |
| <a class="dropdown-item" href="security.html">Security</a> |
| <a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a> |
| <a class="dropdown-item" href="migration-guide.html">Migration Guide</a> |
| <div class="dropdown-divider"></div> |
| <a class="dropdown-item" href="building-spark.html">Building Spark</a> |
| <a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a> |
| <a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a> |
| </div> |
| </li> |
| |
| <li class="nav-item"> |
| <input type="text" id="docsearch-input" placeholder="Search the docs…"> |
| </li> |
| </ul> |
| <!--<span class="navbar-text navbar-right"><span class="version-text">v3.5.0</span></span>--> |
| </div> |
| </nav> |
| |
| |
| |
| <div class="container"> |
| |
| |
| <div class="content mr-3" id="content"> |
| |
| |
| <h1 class="title">Quick Start</h1> |
| |
| |
| <ul id="markdown-toc"> |
| <li><a href="#interactive-analysis-with-the-spark-shell" id="markdown-toc-interactive-analysis-with-the-spark-shell">Interactive Analysis with the Spark Shell</a> <ul> |
| <li><a href="#basics" id="markdown-toc-basics">Basics</a></li> |
| <li><a href="#more-on-dataset-operations" id="markdown-toc-more-on-dataset-operations">More on Dataset Operations</a></li> |
| <li><a href="#caching" id="markdown-toc-caching">Caching</a></li> |
| </ul> |
| </li> |
| <li><a href="#self-contained-applications" id="markdown-toc-self-contained-applications">Self-Contained Applications</a></li> |
| <li><a href="#where-to-go-from-here" id="markdown-toc-where-to-go-from-here">Where to Go from Here</a></li> |
| </ul> |
| |
| <p>This tutorial provides a quick introduction to using Spark. We will first introduce the API through Spark’s |
| interactive shell (in Python or Scala), |
| then show how to write applications in Java, Scala, and Python.</p> |
| |
| <p>To follow along with this guide, first, download a packaged release of Spark from the |
| <a href="https://spark.apache.org/downloads.html">Spark website</a>. Since we won’t be using HDFS, |
| you can download a package for any version of Hadoop.</p> |
| |
| <p>Note that, before Spark 2.0, the main programming interface of Spark was the Resilient Distributed Dataset (RDD). After Spark 2.0, RDDs are replaced by Dataset, which is strongly-typed like an RDD, but with richer optimizations under the hood. The RDD interface is still supported, and you can get a more detailed reference at the <a href="rdd-programming-guide.html">RDD programming guide</a>. However, we highly recommend you to switch to use Dataset, which has better performance than RDD. See the <a href="sql-programming-guide.html">SQL programming guide</a> to get more information about Dataset.</p> |
| |
| <h1 id="interactive-analysis-with-the-spark-shell">Interactive Analysis with the Spark Shell</h1> |
| |
| <h2 id="basics">Basics</h2> |
| |
| <p>Spark’s shell provides a simple way to learn the API, as well as a powerful tool to analyze data interactively. |
| It is available in either Scala (which runs on the Java VM and is thus a good way to use existing Java libraries) |
| or Python. Start it by running the following in the Spark directory:</p> |
| |
| <div class="codetabs"> |
| |
| <div data-lang="python"> |
| |
| <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/pyspark |
| </code></pre></div> </div> |
| |
| <p>Or if PySpark is installed with pip in your current environment:</p> |
| |
| <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>pyspark |
| </code></pre></div> </div> |
| |
| <p>Spark’s primary abstraction is a distributed collection of items called a Dataset. Datasets can be created from Hadoop InputFormats (such as HDFS files) or by transforming other Datasets. Due to Python’s dynamic nature, we don’t need the Dataset to be strongly-typed in Python. As a result, all Datasets in Python are Dataset[Row], and we call it <code class="language-plaintext highlighter-rouge">DataFrame</code> to be consistent with the data frame concept in Pandas and R. Let’s make a new DataFrame from the text of the README file in the Spark source directory:</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="n">textFile</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="n">text</span><span class="p">(</span><span class="s">"README.md"</span><span class="p">)</span></code></pre></figure> |
| |
| <p>You can get values from DataFrame directly, by calling some actions, or transform the DataFrame to get a new one. For more details, please read the <em><a href="api/python/index.html#pyspark.sql.DataFrame">API doc</a></em>.</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="n">textFile</span><span class="p">.</span><span class="n">count</span><span class="p">()</span> <span class="c1"># Number of rows in this DataFrame |
| </span><span class="mi">126</span> |
| |
| <span class="o">>>></span> <span class="n">textFile</span><span class="p">.</span><span class="n">first</span><span class="p">()</span> <span class="c1"># First row in this DataFrame |
| </span><span class="n">Row</span><span class="p">(</span><span class="n">value</span><span class="o">=</span><span class="sa">u</span><span class="s">'# Apache Spark'</span><span class="p">)</span></code></pre></figure> |
| |
| <p>Now let’s transform this DataFrame to a new one. We call <code class="language-plaintext highlighter-rouge">filter</code> to return a new DataFrame with a subset of the lines in the file.</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="n">linesWithSpark</span> <span class="o">=</span> <span class="n">textFile</span><span class="p">.</span><span class="nb">filter</span><span class="p">(</span><span class="n">textFile</span><span class="p">.</span><span class="n">value</span><span class="p">.</span><span class="n">contains</span><span class="p">(</span><span class="s">"Spark"</span><span class="p">))</span></code></pre></figure> |
| |
| <p>We can chain together transformations and actions:</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="n">textFile</span><span class="p">.</span><span class="nb">filter</span><span class="p">(</span><span class="n">textFile</span><span class="p">.</span><span class="n">value</span><span class="p">.</span><span class="n">contains</span><span class="p">(</span><span class="s">"Spark"</span><span class="p">)).</span><span class="n">count</span><span class="p">()</span> <span class="c1"># How many lines contain "Spark"? |
| </span><span class="mi">15</span></code></pre></figure> |
| |
| </div> |
| |
| <div data-lang="scala"> |
| |
| <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./bin/spark-shell |
| </code></pre></div> </div> |
| |
| <p>Spark’s primary abstraction is a distributed collection of items called a Dataset. Datasets can be created from Hadoop InputFormats (such as HDFS files) or by transforming other Datasets. Let’s make a new Dataset from the text of the README file in the Spark source directory:</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="k">val</span> <span class="nv">textFile</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">textFile</span><span class="o">(</span><span class="s">"README.md"</span><span class="o">)</span> |
| <span class="n">textFile</span><span class="k">:</span> <span class="kt">org.apache.spark.sql.Dataset</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="o">[</span><span class="kt">value:</span> <span class="kt">string</span><span class="o">]</span></code></pre></figure> |
| |
| <p>You can get values from Dataset directly, by calling some actions, or transform the Dataset to get a new one. For more details, please read the <em><a href="api/scala/org/apache/spark/sql/Dataset.html">API doc</a></em>.</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="nv">textFile</span><span class="o">.</span><span class="py">count</span><span class="o">()</span> <span class="c1">// Number of items in this Dataset</span> |
| <span class="n">res0</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">126</span> <span class="c1">// May be different from yours as README.md will change over time, similar to other outputs</span> |
| |
| <span class="n">scala</span><span class="o">></span> <span class="nv">textFile</span><span class="o">.</span><span class="py">first</span><span class="o">()</span> <span class="c1">// First item in this Dataset</span> |
| <span class="n">res1</span><span class="k">:</span> <span class="kt">String</span> <span class="o">=</span> <span class="k">#</span> <span class="nc">Apache</span> <span class="nc">Spark</span></code></pre></figure> |
| |
| <p>Now let’s transform this Dataset into a new one. We call <code class="language-plaintext highlighter-rouge">filter</code> to return a new Dataset with a subset of the items in the file.</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="k">val</span> <span class="nv">linesWithSpark</span> <span class="k">=</span> <span class="nv">textFile</span><span class="o">.</span><span class="py">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=></span> <span class="nv">line</span><span class="o">.</span><span class="py">contains</span><span class="o">(</span><span class="s">"Spark"</span><span class="o">))</span> |
| <span class="n">linesWithSpark</span><span class="k">:</span> <span class="kt">org.apache.spark.sql.Dataset</span><span class="o">[</span><span class="kt">String</span><span class="o">]</span> <span class="k">=</span> <span class="o">[</span><span class="kt">value:</span> <span class="kt">string</span><span class="o">]</span></code></pre></figure> |
| |
| <p>We can chain together transformations and actions:</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="nv">textFile</span><span class="o">.</span><span class="py">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=></span> <span class="nv">line</span><span class="o">.</span><span class="py">contains</span><span class="o">(</span><span class="s">"Spark"</span><span class="o">)).</span><span class="py">count</span><span class="o">()</span> <span class="c1">// How many lines contain "Spark"?</span> |
| <span class="n">res3</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">15</span></code></pre></figure> |
| |
| </div> |
| |
| </div> |
| |
| <h2 id="more-on-dataset-operations">More on Dataset Operations</h2> |
| <p>Dataset actions and transformations can be used for more complex computations. Let’s say we want to find the line with the most words:</p> |
| |
| <div class="codetabs"> |
| |
| <div data-lang="python"> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="kn">from</span> <span class="nn">pyspark.sql</span> <span class="kn">import</span> <span class="n">functions</span> <span class="k">as</span> <span class="n">sf</span> |
| <span class="o">>>></span> <span class="n">textFile</span><span class="p">.</span><span class="n">select</span><span class="p">(</span><span class="n">sf</span><span class="p">.</span><span class="n">size</span><span class="p">(</span><span class="n">sf</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="n">textFile</span><span class="p">.</span><span class="n">value</span><span class="p">,</span> <span class="s">"\s+"</span><span class="p">)).</span><span class="n">name</span><span class="p">(</span><span class="s">"numWords"</span><span class="p">)).</span><span class="n">agg</span><span class="p">(</span><span class="n">sf</span><span class="p">.</span><span class="nb">max</span><span class="p">(</span><span class="n">sf</span><span class="p">.</span><span class="n">col</span><span class="p">(</span><span class="s">"numWords"</span><span class="p">))).</span><span class="n">collect</span><span class="p">()</span> |
| <span class="p">[</span><span class="n">Row</span><span class="p">(</span><span class="nb">max</span><span class="p">(</span><span class="n">numWords</span><span class="p">)</span><span class="o">=</span><span class="mi">15</span><span class="p">)]</span></code></pre></figure> |
| |
| <p>This first maps a line to an integer value and aliases it as “numWords”, creating a new DataFrame. <code class="language-plaintext highlighter-rouge">agg</code> is called on that DataFrame to find the largest word count. The arguments to <code class="language-plaintext highlighter-rouge">select</code> and <code class="language-plaintext highlighter-rouge">agg</code> are both <em><a href="api/python/index.html#pyspark.sql.Column">Column</a></em>, we can use <code class="language-plaintext highlighter-rouge">df.colName</code> to get a column from a DataFrame. We can also import pyspark.sql.functions, which provides a lot of convenient functions to build a new Column from an old one.</p> |
| |
| <p>One common data flow pattern is MapReduce, as popularized by Hadoop. Spark can implement MapReduce flows easily:</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="n">wordCounts</span> <span class="o">=</span> <span class="n">textFile</span><span class="p">.</span><span class="n">select</span><span class="p">(</span><span class="n">sf</span><span class="p">.</span><span class="n">explode</span><span class="p">(</span><span class="n">sf</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="n">textFile</span><span class="p">.</span><span class="n">value</span><span class="p">,</span> <span class="s">"\s+"</span><span class="p">)).</span><span class="n">alias</span><span class="p">(</span><span class="s">"word"</span><span class="p">)).</span><span class="n">groupBy</span><span class="p">(</span><span class="s">"word"</span><span class="p">).</span><span class="n">count</span><span class="p">()</span></code></pre></figure> |
| |
| <p>Here, we use the <code class="language-plaintext highlighter-rouge">explode</code> function in <code class="language-plaintext highlighter-rouge">select</code>, to transform a Dataset of lines to a Dataset of words, and then combine <code class="language-plaintext highlighter-rouge">groupBy</code> and <code class="language-plaintext highlighter-rouge">count</code> to compute the per-word counts in the file as a DataFrame of 2 columns: “word” and “count”. To collect the word counts in our shell, we can call <code class="language-plaintext highlighter-rouge">collect</code>:</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="n">wordCounts</span><span class="p">.</span><span class="n">collect</span><span class="p">()</span> |
| <span class="p">[</span><span class="n">Row</span><span class="p">(</span><span class="n">word</span><span class="o">=</span><span class="sa">u</span><span class="s">'online'</span><span class="p">,</span> <span class="n">count</span><span class="o">=</span><span class="mi">1</span><span class="p">),</span> <span class="n">Row</span><span class="p">(</span><span class="n">word</span><span class="o">=</span><span class="sa">u</span><span class="s">'graphs'</span><span class="p">,</span> <span class="n">count</span><span class="o">=</span><span class="mi">1</span><span class="p">),</span> <span class="p">...]</span></code></pre></figure> |
| |
| </div> |
| |
| <div data-lang="scala"> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="nv">textFile</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="n">line</span> <span class="k">=></span> <span class="nv">line</span><span class="o">.</span><span class="py">split</span><span class="o">(</span><span class="s">" "</span><span class="o">).</span><span class="py">size</span><span class="o">).</span><span class="py">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=></span> <span class="nf">if</span> <span class="o">(</span><span class="n">a</span> <span class="o">></span> <span class="n">b</span><span class="o">)</span> <span class="n">a</span> <span class="k">else</span> <span class="n">b</span><span class="o">)</span> |
| <span class="n">res4</span><span class="k">:</span> <span class="kt">Int</span> <span class="o">=</span> <span class="mi">15</span></code></pre></figure> |
| |
| <p>This first maps a line to an integer value, creating a new Dataset. <code class="language-plaintext highlighter-rouge">reduce</code> is called on that Dataset to find the largest word count. The arguments to <code class="language-plaintext highlighter-rouge">map</code> and <code class="language-plaintext highlighter-rouge">reduce</code> are Scala function literals (closures), and can use any language feature or Scala/Java library. For example, we can easily call functions declared elsewhere. We’ll use <code class="language-plaintext highlighter-rouge">Math.max()</code> function to make this code easier to understand:</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="k">import</span> <span class="nn">java.lang.Math</span> |
| <span class="k">import</span> <span class="nn">java.lang.Math</span> |
| |
| <span class="n">scala</span><span class="o">></span> <span class="nv">textFile</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="n">line</span> <span class="k">=></span> <span class="nv">line</span><span class="o">.</span><span class="py">split</span><span class="o">(</span><span class="s">" "</span><span class="o">).</span><span class="py">size</span><span class="o">).</span><span class="py">reduce</span><span class="o">((</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">)</span> <span class="k">=></span> <span class="nv">Math</span><span class="o">.</span><span class="py">max</span><span class="o">(</span><span class="n">a</span><span class="o">,</span> <span class="n">b</span><span class="o">))</span> |
| <span class="n">res5</span><span class="k">:</span> <span class="kt">Int</span> <span class="o">=</span> <span class="mi">15</span></code></pre></figure> |
| |
| <p>One common data flow pattern is MapReduce, as popularized by Hadoop. Spark can implement MapReduce flows easily:</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="k">val</span> <span class="nv">wordCounts</span> <span class="k">=</span> <span class="nv">textFile</span><span class="o">.</span><span class="py">flatMap</span><span class="o">(</span><span class="n">line</span> <span class="k">=></span> <span class="nv">line</span><span class="o">.</span><span class="py">split</span><span class="o">(</span><span class="s">" "</span><span class="o">)).</span><span class="py">groupByKey</span><span class="o">(</span><span class="n">identity</span><span class="o">).</span><span class="py">count</span><span class="o">()</span> |
| <span class="n">wordCounts</span><span class="k">:</span> <span class="kt">org.apache.spark.sql.Dataset</span><span class="o">[(</span><span class="kt">String</span>, <span class="kt">Long</span><span class="o">)]</span> <span class="k">=</span> <span class="o">[</span><span class="kt">value:</span> <span class="kt">string</span>, <span class="kt">count</span><span class="o">(</span><span class="err">1</span><span class="o">)</span><span class="kt">:</span> <span class="kt">bigint</span><span class="o">]</span></code></pre></figure> |
| |
| <p>Here, we call <code class="language-plaintext highlighter-rouge">flatMap</code> to transform a Dataset of lines to a Dataset of words, and then combine <code class="language-plaintext highlighter-rouge">groupByKey</code> and <code class="language-plaintext highlighter-rouge">count</code> to compute the per-word counts in the file as a Dataset of (String, Long) pairs. To collect the word counts in our shell, we can call <code class="language-plaintext highlighter-rouge">collect</code>:</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="nv">wordCounts</span><span class="o">.</span><span class="py">collect</span><span class="o">()</span> |
| <span class="n">res6</span><span class="k">:</span> <span class="kt">Array</span><span class="o">[(</span><span class="kt">String</span>, <span class="kt">Int</span><span class="o">)]</span> <span class="k">=</span> <span class="nc">Array</span><span class="o">((</span><span class="n">means</span><span class="o">,</span><span class="mi">1</span><span class="o">),</span> <span class="o">(</span><span class="n">under</span><span class="o">,</span><span class="mi">2</span><span class="o">),</span> <span class="o">(</span><span class="k">this</span><span class="o">,</span><span class="mi">3</span><span class="o">),</span> <span class="o">(</span><span class="nc">Because</span><span class="o">,</span><span class="mi">1</span><span class="o">),</span> <span class="o">(</span><span class="nc">Python</span><span class="o">,</span><span class="mi">2</span><span class="o">),</span> <span class="o">(</span><span class="n">agree</span><span class="o">,</span><span class="mi">1</span><span class="o">),</span> <span class="o">(</span><span class="n">cluster</span><span class="o">.,</span><span class="mi">1</span><span class="o">),</span> <span class="o">...)</span></code></pre></figure> |
| |
| </div> |
| |
| </div> |
| |
| <h2 id="caching">Caching</h2> |
| <p>Spark also supports pulling data sets into a cluster-wide in-memory cache. This is very useful when data is accessed repeatedly, such as when querying a small “hot” dataset or when running an iterative algorithm like PageRank. As a simple example, let’s mark our <code class="language-plaintext highlighter-rouge">linesWithSpark</code> dataset to be cached:</p> |
| |
| <div class="codetabs"> |
| |
| <div data-lang="python"> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="o">>>></span> <span class="n">linesWithSpark</span><span class="p">.</span><span class="n">cache</span><span class="p">()</span> |
| |
| <span class="o">>>></span> <span class="n">linesWithSpark</span><span class="p">.</span><span class="n">count</span><span class="p">()</span> |
| <span class="mi">15</span> |
| |
| <span class="o">>>></span> <span class="n">linesWithSpark</span><span class="p">.</span><span class="n">count</span><span class="p">()</span> |
| <span class="mi">15</span></code></pre></figure> |
| |
| <p>It may seem silly to use Spark to explore and cache a 100-line text file. The interesting part is |
| that these same functions can be used on very large data sets, even when they are striped across |
| tens or hundreds of nodes. You can also do this interactively by connecting <code class="language-plaintext highlighter-rouge">bin/pyspark</code> to |
| a cluster, as described in the <a href="rdd-programming-guide.html#using-the-shell">RDD programming guide</a>.</p> |
| |
| </div> |
| |
| <div data-lang="scala"> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">scala</span><span class="o">></span> <span class="nv">linesWithSpark</span><span class="o">.</span><span class="py">cache</span><span class="o">()</span> |
| <span class="n">res7</span><span class="k">:</span> <span class="kt">linesWithSpark.</span><span class="k">type</span> <span class="o">=</span> <span class="o">[</span><span class="kt">value:</span> <span class="kt">string</span><span class="o">]</span> |
| |
| <span class="n">scala</span><span class="o">></span> <span class="nv">linesWithSpark</span><span class="o">.</span><span class="py">count</span><span class="o">()</span> |
| <span class="n">res8</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">15</span> |
| |
| <span class="n">scala</span><span class="o">></span> <span class="nv">linesWithSpark</span><span class="o">.</span><span class="py">count</span><span class="o">()</span> |
| <span class="n">res9</span><span class="k">:</span> <span class="kt">Long</span> <span class="o">=</span> <span class="mi">15</span></code></pre></figure> |
| |
| <p>It may seem silly to use Spark to explore and cache a 100-line text file. The interesting part is |
| that these same functions can be used on very large data sets, even when they are striped across |
| tens or hundreds of nodes. You can also do this interactively by connecting <code class="language-plaintext highlighter-rouge">bin/spark-shell</code> to |
| a cluster, as described in the <a href="rdd-programming-guide.html#using-the-shell">RDD programming guide</a>.</p> |
| |
| </div> |
| |
| </div> |
| |
| <h1 id="self-contained-applications">Self-Contained Applications</h1> |
| <p>Suppose we wish to write a self-contained application using the Spark API. We will walk through a |
| simple application in Scala (with sbt), Java (with Maven), and Python (pip).</p> |
| |
| <div class="codetabs"> |
| |
| <div data-lang="python"> |
| |
| <p>Now we will show how to write an application using the Python API (PySpark).</p> |
| |
| <p>If you are building a packaged PySpark application or library you can add it to your setup.py file as:</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"> <span class="n">install_requires</span><span class="o">=</span><span class="p">[</span> |
| <span class="s">'pyspark==3.5.0'</span> |
| <span class="p">]</span></code></pre></figure> |
| |
| <p>As an example, we’ll create a simple Spark application, <code class="language-plaintext highlighter-rouge">SimpleApp.py</code>:</p> |
| |
| <figure class="highlight"><pre><code class="language-python" data-lang="python"><span class="s">"""SimpleApp.py"""</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql</span> <span class="kn">import</span> <span class="n">SparkSession</span> |
| |
| <span class="n">logFile</span> <span class="o">=</span> <span class="s">"YOUR_SPARK_HOME/README.md"</span> <span class="c1"># Should be some file on your system |
| </span><span class="n">spark</span> <span class="o">=</span> <span class="n">SparkSession</span><span class="p">.</span><span class="n">builder</span><span class="p">.</span><span class="n">appName</span><span class="p">(</span><span class="s">"SimpleApp"</span><span class="p">).</span><span class="n">getOrCreate</span><span class="p">()</span> |
| <span class="n">logData</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="n">text</span><span class="p">(</span><span class="n">logFile</span><span class="p">).</span><span class="n">cache</span><span class="p">()</span> |
| |
| <span class="n">numAs</span> <span class="o">=</span> <span class="n">logData</span><span class="p">.</span><span class="nb">filter</span><span class="p">(</span><span class="n">logData</span><span class="p">.</span><span class="n">value</span><span class="p">.</span><span class="n">contains</span><span class="p">(</span><span class="s">'a'</span><span class="p">)).</span><span class="n">count</span><span class="p">()</span> |
| <span class="n">numBs</span> <span class="o">=</span> <span class="n">logData</span><span class="p">.</span><span class="nb">filter</span><span class="p">(</span><span class="n">logData</span><span class="p">.</span><span class="n">value</span><span class="p">.</span><span class="n">contains</span><span class="p">(</span><span class="s">'b'</span><span class="p">)).</span><span class="n">count</span><span class="p">()</span> |
| |
| <span class="k">print</span><span class="p">(</span><span class="s">"Lines with a: %i, lines with b: %i"</span> <span class="o">%</span> <span class="p">(</span><span class="n">numAs</span><span class="p">,</span> <span class="n">numBs</span><span class="p">))</span> |
| |
| <span class="n">spark</span><span class="p">.</span><span class="n">stop</span><span class="p">()</span></code></pre></figure> |
| |
| <p>This program just counts the number of lines containing ‘a’ and the number containing ‘b’ in a |
| text file. |
| Note that you’ll need to replace YOUR_SPARK_HOME with the location where Spark is installed. |
| As with the Scala and Java examples, we use a SparkSession to create Datasets. |
| For applications that use custom classes or third-party libraries, we can also add code |
| dependencies to <code class="language-plaintext highlighter-rouge">spark-submit</code> through its <code class="language-plaintext highlighter-rouge">--py-files</code> argument by packaging them into a |
| .zip file (see <code class="language-plaintext highlighter-rouge">spark-submit --help</code> for details). |
| <code class="language-plaintext highlighter-rouge">SimpleApp</code> is simple enough that we do not need to specify any code dependencies.</p> |
| |
| <p>We can run this application using the <code class="language-plaintext highlighter-rouge">bin/spark-submit</code> script:</p> |
| |
| <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># Use spark-submit to run your application</span> |
| <span class="nv">$ </span>YOUR_SPARK_HOME/bin/spark-submit <span class="se">\</span> |
| <span class="nt">--master</span> <span class="nb">local</span><span class="o">[</span>4] <span class="se">\</span> |
| SimpleApp.py |
| ... |
| Lines with a: 46, Lines with b: 23</code></pre></figure> |
| |
| <p>If you have PySpark pip installed into your environment (e.g., <code class="language-plaintext highlighter-rouge">pip install pyspark</code>), you can run your application with the regular Python interpreter or use the provided ‘spark-submit’ as you prefer.</p> |
| |
| <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># Use the Python interpreter to run your application</span> |
| <span class="nv">$ </span>python SimpleApp.py |
| ... |
| Lines with a: 46, Lines with b: 23</code></pre></figure> |
| |
| </div> |
| |
| <div data-lang="scala"> |
| |
| <p>We’ll create a very simple Spark application in Scala–so simple, in fact, that it’s |
| named <code class="language-plaintext highlighter-rouge">SimpleApp.scala</code>:</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="cm">/* SimpleApp.scala */</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.sql.SparkSession</span> |
| |
| <span class="k">object</span> <span class="nc">SimpleApp</span> <span class="o">{</span> |
| <span class="k">def</span> <span class="nf">main</span><span class="o">(</span><span class="n">args</span><span class="k">:</span> <span class="kt">Array</span><span class="o">[</span><span class="kt">String</span><span class="o">])</span><span class="k">:</span> <span class="kt">Unit</span> <span class="o">=</span> <span class="o">{</span> |
| <span class="k">val</span> <span class="nv">logFile</span> <span class="k">=</span> <span class="s">"YOUR_SPARK_HOME/README.md"</span> <span class="c1">// Should be some file on your system</span> |
| <span class="k">val</span> <span class="nv">spark</span> <span class="k">=</span> <span class="nv">SparkSession</span><span class="o">.</span><span class="py">builder</span><span class="o">.</span><span class="py">appName</span><span class="o">(</span><span class="s">"Simple Application"</span><span class="o">).</span><span class="py">getOrCreate</span><span class="o">()</span> |
| <span class="k">val</span> <span class="nv">logData</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">textFile</span><span class="o">(</span><span class="n">logFile</span><span class="o">).</span><span class="py">cache</span><span class="o">()</span> |
| <span class="k">val</span> <span class="nv">numAs</span> <span class="k">=</span> <span class="nv">logData</span><span class="o">.</span><span class="py">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=></span> <span class="nv">line</span><span class="o">.</span><span class="py">contains</span><span class="o">(</span><span class="s">"a"</span><span class="o">)).</span><span class="py">count</span><span class="o">()</span> |
| <span class="k">val</span> <span class="nv">numBs</span> <span class="k">=</span> <span class="nv">logData</span><span class="o">.</span><span class="py">filter</span><span class="o">(</span><span class="n">line</span> <span class="k">=></span> <span class="nv">line</span><span class="o">.</span><span class="py">contains</span><span class="o">(</span><span class="s">"b"</span><span class="o">)).</span><span class="py">count</span><span class="o">()</span> |
| <span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Lines with a: $numAs, Lines with b: $numBs"</span><span class="o">)</span> |
| <span class="nv">spark</span><span class="o">.</span><span class="py">stop</span><span class="o">()</span> |
| <span class="o">}</span> |
| <span class="o">}</span></code></pre></figure> |
| |
| <p>Note that applications should define a <code class="language-plaintext highlighter-rouge">main()</code> method instead of extending <code class="language-plaintext highlighter-rouge">scala.App</code>. |
| Subclasses of <code class="language-plaintext highlighter-rouge">scala.App</code> may not work correctly.</p> |
| |
| <p>This program just counts the number of lines containing ‘a’ and the number containing ‘b’ in the |
| Spark README. Note that you’ll need to replace YOUR_SPARK_HOME with the location where Spark is |
| installed. Unlike the earlier examples with the Spark shell, which initializes its own SparkSession, |
| we initialize a SparkSession as part of the program.</p> |
| |
| <p>We call <code class="language-plaintext highlighter-rouge">SparkSession.builder</code> to construct a <code class="language-plaintext highlighter-rouge">SparkSession</code>, then set the application name, and finally call <code class="language-plaintext highlighter-rouge">getOrCreate</code> to get the <code class="language-plaintext highlighter-rouge">SparkSession</code> instance.</p> |
| |
| <p>Our application depends on the Spark API, so we’ll also include an sbt configuration file, |
| <code class="language-plaintext highlighter-rouge">build.sbt</code>, which explains that Spark is a dependency. This file also adds a repository that |
| Spark depends on:</p> |
| |
| <figure class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="n">name</span> <span class="o">:=</span> <span class="s">"Simple Project"</span> |
| |
| <span class="n">version</span> <span class="o">:=</span> <span class="s">"1.0"</span> |
| |
| <span class="n">scalaVersion</span> <span class="o">:=</span> <span class="s">"2.12.18"</span> |
| |
| <span class="n">libraryDependencies</span> <span class="o">+=</span> <span class="s">"org.apache.spark"</span> <span class="o">%%</span> <span class="s">"spark-sql"</span> <span class="o">%</span> <span class="s">"3.5.0"</span></code></pre></figure> |
| |
| <p>For sbt to work correctly, we’ll need to layout <code class="language-plaintext highlighter-rouge">SimpleApp.scala</code> and <code class="language-plaintext highlighter-rouge">build.sbt</code> |
| according to the typical directory structure. Once that is in place, we can create a JAR package |
| containing the application’s code, then use the <code class="language-plaintext highlighter-rouge">spark-submit</code> script to run our program.</p> |
| |
| <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># Your directory layout should look like this</span> |
| <span class="nv">$ </span>find <span class="nb">.</span> |
| <span class="nb">.</span> |
| ./build.sbt |
| ./src |
| ./src/main |
| ./src/main/scala |
| ./src/main/scala/SimpleApp.scala |
| |
| <span class="c"># Package a jar containing your application</span> |
| <span class="nv">$ </span>sbt package |
| ... |
| <span class="o">[</span>info] Packaging <span class="o">{</span>..<span class="o">}</span>/<span class="o">{</span>..<span class="o">}</span>/target/scala-2.12/simple-project_2.12-1.0.jar |
| |
| <span class="c"># Use spark-submit to run your application</span> |
| <span class="nv">$ </span>YOUR_SPARK_HOME/bin/spark-submit <span class="se">\</span> |
| <span class="nt">--class</span> <span class="s2">"SimpleApp"</span> <span class="se">\</span> |
| <span class="nt">--master</span> <span class="nb">local</span><span class="o">[</span>4] <span class="se">\</span> |
| target/scala-2.12/simple-project_2.12-1.0.jar |
| ... |
| Lines with a: 46, Lines with b: 23</code></pre></figure> |
| |
| </div> |
| <div data-lang="java"> |
| <p>This example will use Maven to compile an application JAR, but any similar build system will work.</p> |
| |
| <p>We’ll create a very simple Spark application, <code class="language-plaintext highlighter-rouge">SimpleApp.java</code>:</p> |
| |
| <figure class="highlight"><pre><code class="language-java" data-lang="java"><span class="cm">/* SimpleApp.java */</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.sql.SparkSession</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span> |
| |
| <span class="kd">public</span> <span class="kd">class</span> <span class="nc">SimpleApp</span> <span class="o">{</span> |
| <span class="kd">public</span> <span class="kd">static</span> <span class="kt">void</span> <span class="nf">main</span><span class="o">(</span><span class="nc">String</span><span class="o">[]</span> <span class="n">args</span><span class="o">)</span> <span class="o">{</span> |
| <span class="nc">String</span> <span class="n">logFile</span> <span class="o">=</span> <span class="s">"YOUR_SPARK_HOME/README.md"</span><span class="o">;</span> <span class="c1">// Should be some file on your system</span> |
| <span class="nc">SparkSession</span> <span class="n">spark</span> <span class="o">=</span> <span class="nc">SparkSession</span><span class="o">.</span><span class="na">builder</span><span class="o">().</span><span class="na">appName</span><span class="o">(</span><span class="s">"Simple Application"</span><span class="o">).</span><span class="na">getOrCreate</span><span class="o">();</span> |
| <span class="nc">Dataset</span><span class="o"><</span><span class="nc">String</span><span class="o">></span> <span class="n">logData</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">textFile</span><span class="o">(</span><span class="n">logFile</span><span class="o">).</span><span class="na">cache</span><span class="o">();</span> |
| |
| <span class="kt">long</span> <span class="n">numAs</span> <span class="o">=</span> <span class="n">logData</span><span class="o">.</span><span class="na">filter</span><span class="o">(</span><span class="n">s</span> <span class="o">-></span> <span class="n">s</span><span class="o">.</span><span class="na">contains</span><span class="o">(</span><span class="s">"a"</span><span class="o">)).</span><span class="na">count</span><span class="o">();</span> |
| <span class="kt">long</span> <span class="n">numBs</span> <span class="o">=</span> <span class="n">logData</span><span class="o">.</span><span class="na">filter</span><span class="o">(</span><span class="n">s</span> <span class="o">-></span> <span class="n">s</span><span class="o">.</span><span class="na">contains</span><span class="o">(</span><span class="s">"b"</span><span class="o">)).</span><span class="na">count</span><span class="o">();</span> |
| |
| <span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Lines with a: "</span> <span class="o">+</span> <span class="n">numAs</span> <span class="o">+</span> <span class="s">", lines with b: "</span> <span class="o">+</span> <span class="n">numBs</span><span class="o">);</span> |
| |
| <span class="n">spark</span><span class="o">.</span><span class="na">stop</span><span class="o">();</span> |
| <span class="o">}</span> |
| <span class="o">}</span></code></pre></figure> |
| |
| <p>This program just counts the number of lines containing ‘a’ and the number containing ‘b’ in the |
| Spark README. Note that you’ll need to replace YOUR_SPARK_HOME with the location where Spark is |
| installed. Unlike the earlier examples with the Spark shell, which initializes its own SparkSession, |
| we initialize a SparkSession as part of the program.</p> |
| |
| <p>To build the program, we also write a Maven <code class="language-plaintext highlighter-rouge">pom.xml</code> file that lists Spark as a dependency. |
| Note that Spark artifacts are tagged with a Scala version.</p> |
| |
| <figure class="highlight"><pre><code class="language-xml" data-lang="xml"><span class="nt"><project></span> |
| <span class="nt"><groupId></span>edu.berkeley<span class="nt"></groupId></span> |
| <span class="nt"><artifactId></span>simple-project<span class="nt"></artifactId></span> |
| <span class="nt"><modelVersion></span>4.0.0<span class="nt"></modelVersion></span> |
| <span class="nt"><name></span>Simple Project<span class="nt"></name></span> |
| <span class="nt"><packaging></span>jar<span class="nt"></packaging></span> |
| <span class="nt"><version></span>1.0<span class="nt"></version></span> |
| <span class="nt"><dependencies></span> |
| <span class="nt"><dependency></span> <span class="c"><!-- Spark dependency --></span> |
| <span class="nt"><groupId></span>org.apache.spark<span class="nt"></groupId></span> |
| <span class="nt"><artifactId></span>spark-sql_2.12<span class="nt"></artifactId></span> |
| <span class="nt"><version></span>3.5.0<span class="nt"></version></span> |
| <span class="nt"><scope></span>provided<span class="nt"></scope></span> |
| <span class="nt"></dependency></span> |
| <span class="nt"></dependencies></span> |
| <span class="nt"></project></span></code></pre></figure> |
| |
| <p>We lay out these files according to the canonical Maven directory structure:</p> |
| |
| <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="nv">$ </span>find <span class="nb">.</span> |
| ./pom.xml |
| ./src |
| ./src/main |
| ./src/main/java |
| ./src/main/java/SimpleApp.java</code></pre></figure> |
| |
| <p>Now, we can package the application using Maven and execute it with <code class="language-plaintext highlighter-rouge">./bin/spark-submit</code>.</p> |
| |
| <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># Package a JAR containing your application</span> |
| <span class="nv">$ </span>mvn package |
| ... |
| <span class="o">[</span>INFO] Building jar: <span class="o">{</span>..<span class="o">}</span>/<span class="o">{</span>..<span class="o">}</span>/target/simple-project-1.0.jar |
| |
| <span class="c"># Use spark-submit to run your application</span> |
| <span class="nv">$ </span>YOUR_SPARK_HOME/bin/spark-submit <span class="se">\</span> |
| <span class="nt">--class</span> <span class="s2">"SimpleApp"</span> <span class="se">\</span> |
| <span class="nt">--master</span> <span class="nb">local</span><span class="o">[</span>4] <span class="se">\</span> |
| target/simple-project-1.0.jar |
| ... |
| Lines with a: 46, Lines with b: 23</code></pre></figure> |
| |
| </div> |
| |
| </div> |
| |
| <p>Other dependency management tools such as Conda and pip can be also used for custom classes or third-party libraries. See also <a href="api/python/user_guide/python_packaging.html">Python Package Management</a>.</p> |
| |
| <h1 id="where-to-go-from-here">Where to Go from Here</h1> |
| <p>Congratulations on running your first Spark application!</p> |
| |
| <ul> |
| <li>For an in-depth overview of the API, start with the <a href="rdd-programming-guide.html">RDD programming guide</a> and the <a href="sql-programming-guide.html">SQL programming guide</a>, or see “Programming Guides” menu for other components.</li> |
| <li>For running applications on a cluster, head to the <a href="cluster-overview.html">deployment overview</a>.</li> |
| <li>Finally, Spark includes several samples in the <code class="language-plaintext highlighter-rouge">examples</code> directory |
| (<a href="https://github.com/apache/spark/tree/master/examples/src/main/scala/org/apache/spark/examples">Scala</a>, |
| <a href="https://github.com/apache/spark/tree/master/examples/src/main/java/org/apache/spark/examples">Java</a>, |
| <a href="https://github.com/apache/spark/tree/master/examples/src/main/python">Python</a>, |
| <a href="https://github.com/apache/spark/tree/master/examples/src/main/r">R</a>). |
| You can run them as follows:</li> |
| </ul> |
| |
| <figure class="highlight"><pre><code class="language-bash" data-lang="bash"><span class="c"># For Scala and Java, use run-example:</span> |
| ./bin/run-example SparkPi |
| |
| <span class="c"># For Python examples, use spark-submit directly:</span> |
| ./bin/spark-submit examples/src/main/python/pi.py |
| |
| <span class="c"># For R examples, use spark-submit directly:</span> |
| ./bin/spark-submit examples/src/main/r/dataframe.R</code></pre></figure> |
| |
| |
| </div> |
| |
| <!-- /container --> |
| </div> |
| |
| <script src="js/vendor/jquery-3.5.1.min.js"></script> |
| <script src="js/vendor/bootstrap.bundle.min.js"></script> |
| |
| <script src="js/vendor/anchor.min.js"></script> |
| <script src="js/main.js"></script> |
| |
| <script type="text/javascript" src="js/vendor/docsearch.min.js"></script> |
| <script type="text/javascript"> |
| // DocSearch is entirely free and automated. DocSearch is built in two parts: |
| // 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link |
| // in your website and extract content from every page it traverses. It then pushes this |
| // content to an Algolia index. |
| // 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index |
| // to your search input and display its results in a dropdown UI. If you want to find more |
| // details on how works DocSearch, check the docs of DocSearch. |
| docsearch({ |
| apiKey: 'd62f962a82bc9abb53471cb7b89da35e', |
| appId: 'RAI69RXRSK', |
| indexName: 'apache_spark', |
| inputSelector: '#docsearch-input', |
| enhancedSearchInput: true, |
| algoliaOptions: { |
| 'facetFilters': ["version:3.5.0"] |
| }, |
| debug: false // Set debug to true if you want to inspect the dropdown |
| }); |
| |
| </script> |
| |
| <!-- MathJax Section --> |
| <script type="text/x-mathjax-config"> |
| MathJax.Hub.Config({ |
| TeX: { equationNumbers: { autoNumber: "AMS" } } |
| }); |
| </script> |
| <script> |
| // Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS. |
| // We could use "//cdn.mathjax...", but that won't support "file://". |
| (function(d, script) { |
| script = d.createElement('script'); |
| script.type = 'text/javascript'; |
| script.async = true; |
| script.onload = function(){ |
| MathJax.Hub.Config({ |
| tex2jax: { |
| inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ], |
| displayMath: [ ["$$","$$"], ["\\[", "\\]"] ], |
| processEscapes: true, |
| skipTags: ['script', 'noscript', 'style', 'textarea', 'pre'] |
| } |
| }); |
| }; |
| script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') + |
| 'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' + |
| '?config=TeX-AMS-MML_HTMLorMML'; |
| d.getElementsByTagName('head')[0].appendChild(script); |
| }(document)); |
| </script> |
| </body> |
| </html> |