blob: 508b2c77c9cf31e210e15a056acaccc710af876c [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Clustering - RDD-based API - Spark 3.5.0 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&Courier+Prime:wght@400;700&display=swap" rel="stylesheet">
<link href="css/custom.css" rel="stylesheet">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<link rel="stylesheet" href="css/docsearch.min.css" />
<link rel="stylesheet" href="css/docsearch.css">
<!-- Matomo -->
<script type="text/javascript">
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body class="global">
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar navbar-expand-lg navbar-dark p-0 px-4 fixed-top" style="background: #1d6890;" id="topbar">
<div class="navbar-brand"><a href="index.html">
<img src="img/spark-logo-rev.svg" width="141" height="72"/></a><span class="version">3.5.0</span>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav me-auto">
<li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="quick-start.html">Quick Start</a>
<a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a>
<a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="api/java/index.html">Java</a>
<a class="dropdown-item" href="api/python/index.html">Python</a>
<a class="dropdown-item" href="api/R/index.html">R</a>
<a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="cluster-overview.html">Overview</a>
<a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="running-on-mesos.html">Mesos</a>
<a class="dropdown-item" href="running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="configuration.html">Configuration</a>
<a class="dropdown-item" href="monitoring.html">Monitoring</a>
<a class="dropdown-item" href="tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="security.html">Security</a>
<a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v3.5.0</span></span>-->
</div>
</nav>
<div class="container">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="ml-guide.html">MLlib: Main Guide</a></h3>
<ul>
<li>
<a href="ml-statistics.html">
Basic statistics
</a>
</li>
<li>
<a href="ml-datasource.html">
Data sources
</a>
</li>
<li>
<a href="ml-pipeline.html">
Pipelines
</a>
</li>
<li>
<a href="ml-features.html">
Extracting, transforming and selecting features
</a>
</li>
<li>
<a href="ml-classification-regression.html">
Classification and Regression
</a>
</li>
<li>
<a href="ml-clustering.html">
Clustering
</a>
</li>
<li>
<a href="ml-collaborative-filtering.html">
Collaborative filtering
</a>
</li>
<li>
<a href="ml-frequent-pattern-mining.html">
Frequent Pattern Mining
</a>
</li>
<li>
<a href="ml-tuning.html">
Model selection and tuning
</a>
</li>
<li>
<a href="ml-advanced.html">
Advanced topics
</a>
</li>
</ul>
<h3><a href="mllib-guide.html">MLlib: RDD-based API Guide</a></h3>
<ul>
<li>
<a href="mllib-data-types.html">
Data types
</a>
</li>
<li>
<a href="mllib-statistics.html">
Basic statistics
</a>
</li>
<li>
<a href="mllib-classification-regression.html">
Classification and regression
</a>
</li>
<li>
<a href="mllib-collaborative-filtering.html">
Collaborative filtering
</a>
</li>
<li>
<a href="mllib-clustering.html">
Clustering
</a>
</li>
<ul>
<li>
<a href="mllib-clustering.html#k-means">
k-means
</a>
</li>
<li>
<a href="mllib-clustering.html#gaussian-mixture">
Gaussian mixture
</a>
</li>
<li>
<a href="mllib-clustering.html#power-iteration-clustering-pic">
power iteration clustering (PIC)
</a>
</li>
<li>
<a href="mllib-clustering.html#latent-dirichlet-allocation-lda">
latent Dirichlet allocation (LDA)
</a>
</li>
<li>
<a href="mllib-clustering.html#streaming-k-means">
streaming k-means
</a>
</li>
</ul>
<li>
<a href="mllib-dimensionality-reduction.html">
Dimensionality reduction
</a>
</li>
<li>
<a href="mllib-feature-extraction.html">
Feature extraction and transformation
</a>
</li>
<li>
<a href="mllib-frequent-pattern-mining.html">
Frequent pattern mining
</a>
</li>
<li>
<a href="mllib-evaluation-metrics.html">
Evaluation metrics
</a>
</li>
<li>
<a href="mllib-pmml-model-export.html">
PMML model export
</a>
</li>
<li>
<a href="mllib-optimization.html">
Optimization (developer)
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar mr-3" id="content">
<h1 class="title">Clustering - RDD-based API</h1>
<p><a href="https://en.wikipedia.org/wiki/Cluster_analysis">Clustering</a> is an unsupervised learning problem whereby we aim to group subsets
of entities with one another based on some notion of similarity. Clustering is
often used for exploratory analysis and/or as a component of a hierarchical
<a href="https://en.wikipedia.org/wiki/Supervised_learning">supervised learning</a> pipeline (in which distinct classifiers or regression
models are trained for each cluster).</p>
<p>The <code class="language-plaintext highlighter-rouge">spark.mllib</code> package supports the following models:</p>
<ul id="markdown-toc">
<li><a href="#k-means" id="markdown-toc-k-means">K-means</a></li>
<li><a href="#gaussian-mixture" id="markdown-toc-gaussian-mixture">Gaussian mixture</a></li>
<li><a href="#power-iteration-clustering-pic" id="markdown-toc-power-iteration-clustering-pic">Power iteration clustering (PIC)</a></li>
<li><a href="#latent-dirichlet-allocation-lda" id="markdown-toc-latent-dirichlet-allocation-lda">Latent Dirichlet allocation (LDA)</a></li>
<li><a href="#bisecting-k-means" id="markdown-toc-bisecting-k-means">Bisecting k-means</a></li>
<li><a href="#streaming-k-means" id="markdown-toc-streaming-k-means">Streaming k-means</a></li>
</ul>
<h2 id="k-means">K-means</h2>
<p><a href="http://en.wikipedia.org/wiki/K-means_clustering">K-means</a> is one of the
most commonly used clustering algorithms that clusters the data points into a
predefined number of clusters. The <code class="language-plaintext highlighter-rouge">spark.mllib</code> implementation includes a parallelized
variant of the <a href="http://en.wikipedia.org/wiki/K-means%2B%2B">k-means++</a> method
called <a href="http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf">kmeans||</a>.
The implementation in <code class="language-plaintext highlighter-rouge">spark.mllib</code> has the following parameters:</p>
<ul>
<li><em>k</em> is the number of desired clusters. Note that it is possible for fewer than k clusters to be returned, for example, if there are fewer than k distinct points to cluster.</li>
<li><em>maxIterations</em> is the maximum number of iterations to run.</li>
<li><em>initializationMode</em> specifies either random initialization or
initialization via k-means||.</li>
<li><em>runs</em> This param has no effect since Spark 2.0.0.</li>
<li><em>initializationSteps</em> determines the number of steps in the k-means|| algorithm.</li>
<li><em>epsilon</em> determines the distance threshold within which we consider k-means to have converged.</li>
<li><em>initialModel</em> is an optional set of cluster centers used for initialization. If this parameter is supplied, only one run is performed.</li>
</ul>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>The following examples can be tested in the PySpark shell.</p>
<p>In the following example after loading and parsing data, we use the KMeans object to cluster the
data into two clusters. The number of desired clusters is passed to the algorithm. We then compute
Within Set Sum of Squared Error (WSSSE). You can reduce this error measure by increasing <em>k</em>. In
fact the optimal <em>k</em> is usually one where there is an &#8220;elbow&#8221; in the WSSSE graph.</p>
<p>Refer to the <a href="api/python/reference/api/pyspark.mllib.clustering.KMeans.html"><code class="language-plaintext highlighter-rouge">KMeans</code> Python docs</a> and <a href="api/python/reference/api/pyspark.mllib.clustering.KMeansModel.html"><code class="language-plaintext highlighter-rouge">KMeansModel</code> Python docs</a> for more details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">numpy</span> <span class="kn">import</span> <span class="n">array</span>
<span class="kn">from</span> <span class="nn">math</span> <span class="kn">import</span> <span class="n">sqrt</span>
<span class="kn">from</span> <span class="nn">pyspark.mllib.clustering</span> <span class="kn">import</span> <span class="n">KMeans</span><span class="p">,</span> <span class="n">KMeansModel</span>
<span class="c1"># Load and parse the data
</span><span class="n">data</span> <span class="o">=</span> <span class="n">sc</span><span class="p">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">"data/mllib/kmeans_data.txt"</span><span class="p">)</span>
<span class="n">parsedData</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">array</span><span class="p">([</span><span class="nb">float</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">line</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="s">' '</span><span class="p">)]))</span>
<span class="c1"># Build the model (cluster the data)
</span><span class="n">clusters</span> <span class="o">=</span> <span class="n">KMeans</span><span class="p">.</span><span class="n">train</span><span class="p">(</span><span class="n">parsedData</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">maxIterations</span><span class="o">=</span><span class="mi">10</span><span class="p">,</span> <span class="n">initializationMode</span><span class="o">=</span><span class="s">"random"</span><span class="p">)</span>
<span class="c1"># Evaluate clustering by computing Within Set Sum of Squared Errors
</span><span class="k">def</span> <span class="nf">error</span><span class="p">(</span><span class="n">point</span><span class="p">):</span>
<span class="n">center</span> <span class="o">=</span> <span class="n">clusters</span><span class="p">.</span><span class="n">centers</span><span class="p">[</span><span class="n">clusters</span><span class="p">.</span><span class="n">predict</span><span class="p">(</span><span class="n">point</span><span class="p">)]</span>
<span class="k">return</span> <span class="n">sqrt</span><span class="p">(</span><span class="nb">sum</span><span class="p">([</span><span class="n">x</span><span class="o">**</span><span class="mi">2</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="p">(</span><span class="n">point</span> <span class="o">-</span> <span class="n">center</span><span class="p">)]))</span>
<span class="n">WSSSE</span> <span class="o">=</span> <span class="n">parsedData</span><span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">point</span><span class="p">:</span> <span class="n">error</span><span class="p">(</span><span class="n">point</span><span class="p">)).</span><span class="nb">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span><span class="p">)</span>
<span class="k">print</span><span class="p">(</span><span class="s">"Within Set Sum of Squared Error = "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">WSSSE</span><span class="p">))</span>
<span class="c1"># Save and load model
</span><span class="n">clusters</span><span class="p">.</span><span class="n">save</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonKMeansExample/KMeansModel"</span><span class="p">)</span>
<span class="n">sameModel</span> <span class="o">=</span> <span class="n">KMeansModel</span><span class="p">.</span><span class="n">load</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonKMeansExample/KMeansModel"</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/mllib/k_means_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>The following code snippets can be executed in <code class="language-plaintext highlighter-rouge">spark-shell</code>.</p>
<p>In the following example after loading and parsing data, we use the
<a href="api/scala/org/apache/spark/mllib/clustering/KMeans.html"><code class="language-plaintext highlighter-rouge">KMeans</code></a> object to cluster the data
into two clusters. The number of desired clusters is passed to the algorithm. We then compute Within
Set Sum of Squared Error (WSSSE). You can reduce this error measure by increasing <em>k</em>. In fact, the
optimal <em>k</em> is usually one where there is an &#8220;elbow&#8221; in the WSSSE graph.</p>
<p>Refer to the <a href="api/scala/org/apache/spark/mllib/clustering/KMeans.html"><code class="language-plaintext highlighter-rouge">KMeans</code> Scala docs</a> and <a href="api/scala/org/apache/spark/mllib/clustering/KMeansModel.html"><code class="language-plaintext highlighter-rouge">KMeansModel</code> Scala docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.mllib.clustering.</span><span class="o">{</span><span class="nc">KMeans</span><span class="o">,</span> <span class="nc">KMeansModel</span><span class="o">}</span>
<span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span>
<span class="c1">// Load and parse the data</span>
<span class="k">val</span> <span class="nv">data</span> <span class="k">=</span> <span class="nv">sc</span><span class="o">.</span><span class="py">textFile</span><span class="o">(</span><span class="s">"data/mllib/kmeans_data.txt"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">parsedData</span> <span class="k">=</span> <span class="nv">data</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="n">s</span> <span class="k">=&gt;</span> <span class="nv">Vectors</span><span class="o">.</span><span class="py">dense</span><span class="o">(</span><span class="nv">s</span><span class="o">.</span><span class="py">split</span><span class="o">(</span><span class="sc">' '</span><span class="o">).</span><span class="py">map</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">toDouble</span><span class="o">))).</span><span class="py">cache</span><span class="o">()</span>
<span class="c1">// Cluster the data into two classes using KMeans</span>
<span class="k">val</span> <span class="nv">numClusters</span> <span class="k">=</span> <span class="mi">2</span>
<span class="k">val</span> <span class="nv">numIterations</span> <span class="k">=</span> <span class="mi">20</span>
<span class="k">val</span> <span class="nv">clusters</span> <span class="k">=</span> <span class="nv">KMeans</span><span class="o">.</span><span class="py">train</span><span class="o">(</span><span class="n">parsedData</span><span class="o">,</span> <span class="n">numClusters</span><span class="o">,</span> <span class="n">numIterations</span><span class="o">)</span>
<span class="c1">// Evaluate clustering by computing Within Set Sum of Squared Errors</span>
<span class="k">val</span> <span class="nv">WSSSE</span> <span class="k">=</span> <span class="nv">clusters</span><span class="o">.</span><span class="py">computeCost</span><span class="o">(</span><span class="n">parsedData</span><span class="o">)</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Within Set Sum of Squared Errors = $WSSSE"</span><span class="o">)</span>
<span class="c1">// Save and load model</span>
<span class="nv">clusters</span><span class="o">.</span><span class="py">save</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span> <span class="s">"target/org/apache/spark/KMeansExample/KMeansModel"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">sameModel</span> <span class="k">=</span> <span class="nv">KMeansModel</span><span class="o">.</span><span class="py">load</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span> <span class="s">"target/org/apache/spark/KMeansExample/KMeansModel"</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/mllib/KMeansExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>All of MLlib&#8217;s methods use Java-friendly types, so you can import and call them there the same
way you do in Scala. The only caveat is that the methods take Scala RDD objects, while the
Spark Java API uses a separate <code class="language-plaintext highlighter-rouge">JavaRDD</code> class. You can convert a Java RDD to a Scala one by
calling <code class="language-plaintext highlighter-rouge">.rdd()</code> on your <code class="language-plaintext highlighter-rouge">JavaRDD</code> object. A self-contained application example
that is equivalent to the provided example in Scala is given below:</p>
<p>Refer to the <a href="api/java/org/apache/spark/mllib/clustering/KMeans.html"><code class="language-plaintext highlighter-rouge">KMeans</code> Java docs</a> and <a href="api/java/org/apache/spark/mllib/clustering/KMeansModel.html"><code class="language-plaintext highlighter-rouge">KMeansModel</code> Java docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaRDD</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.KMeans</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.KMeansModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vector</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span><span class="o">;</span>
<span class="c1">// Load and parse data</span>
<span class="nc">String</span> <span class="n">path</span> <span class="o">=</span> <span class="s">"data/mllib/kmeans_data.txt"</span><span class="o">;</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="n">jsc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="n">path</span><span class="o">);</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">Vector</span><span class="o">&gt;</span> <span class="n">parsedData</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="na">map</span><span class="o">(</span><span class="n">s</span> <span class="o">-&gt;</span> <span class="o">{</span>
<span class="nc">String</span><span class="o">[]</span> <span class="n">sarray</span> <span class="o">=</span> <span class="n">s</span><span class="o">.</span><span class="na">split</span><span class="o">(</span><span class="s">" "</span><span class="o">);</span>
<span class="kt">double</span><span class="o">[]</span> <span class="n">values</span> <span class="o">=</span> <span class="k">new</span> <span class="kt">double</span><span class="o">[</span><span class="n">sarray</span><span class="o">.</span><span class="na">length</span><span class="o">];</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">sarray</span><span class="o">.</span><span class="na">length</span><span class="o">;</span> <span class="n">i</span><span class="o">++)</span> <span class="o">{</span>
<span class="n">values</span><span class="o">[</span><span class="n">i</span><span class="o">]</span> <span class="o">=</span> <span class="nc">Double</span><span class="o">.</span><span class="na">parseDouble</span><span class="o">(</span><span class="n">sarray</span><span class="o">[</span><span class="n">i</span><span class="o">]);</span>
<span class="o">}</span>
<span class="k">return</span> <span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="n">values</span><span class="o">);</span>
<span class="o">});</span>
<span class="n">parsedData</span><span class="o">.</span><span class="na">cache</span><span class="o">();</span>
<span class="c1">// Cluster the data into two classes using KMeans</span>
<span class="kt">int</span> <span class="n">numClusters</span> <span class="o">=</span> <span class="mi">2</span><span class="o">;</span>
<span class="kt">int</span> <span class="n">numIterations</span> <span class="o">=</span> <span class="mi">20</span><span class="o">;</span>
<span class="nc">KMeansModel</span> <span class="n">clusters</span> <span class="o">=</span> <span class="nc">KMeans</span><span class="o">.</span><span class="na">train</span><span class="o">(</span><span class="n">parsedData</span><span class="o">.</span><span class="na">rdd</span><span class="o">(),</span> <span class="n">numClusters</span><span class="o">,</span> <span class="n">numIterations</span><span class="o">);</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Cluster centers:"</span><span class="o">);</span>
<span class="k">for</span> <span class="o">(</span><span class="nc">Vector</span> <span class="nl">center:</span> <span class="n">clusters</span><span class="o">.</span><span class="na">clusterCenters</span><span class="o">())</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">" "</span> <span class="o">+</span> <span class="n">center</span><span class="o">);</span>
<span class="o">}</span>
<span class="kt">double</span> <span class="n">cost</span> <span class="o">=</span> <span class="n">clusters</span><span class="o">.</span><span class="na">computeCost</span><span class="o">(</span><span class="n">parsedData</span><span class="o">.</span><span class="na">rdd</span><span class="o">());</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Cost: "</span> <span class="o">+</span> <span class="n">cost</span><span class="o">);</span>
<span class="c1">// Evaluate clustering by computing Within Set Sum of Squared Errors</span>
<span class="kt">double</span> <span class="no">WSSSE</span> <span class="o">=</span> <span class="n">clusters</span><span class="o">.</span><span class="na">computeCost</span><span class="o">(</span><span class="n">parsedData</span><span class="o">.</span><span class="na">rdd</span><span class="o">());</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Within Set Sum of Squared Errors = "</span> <span class="o">+</span> <span class="no">WSSSE</span><span class="o">);</span>
<span class="c1">// Save and load model</span>
<span class="n">clusters</span><span class="o">.</span><span class="na">save</span><span class="o">(</span><span class="n">jsc</span><span class="o">.</span><span class="na">sc</span><span class="o">(),</span> <span class="s">"target/org/apache/spark/JavaKMeansExample/KMeansModel"</span><span class="o">);</span>
<span class="nc">KMeansModel</span> <span class="n">sameModel</span> <span class="o">=</span> <span class="nc">KMeansModel</span><span class="o">.</span><span class="na">load</span><span class="o">(</span><span class="n">jsc</span><span class="o">.</span><span class="na">sc</span><span class="o">(),</span>
<span class="s">"target/org/apache/spark/JavaKMeansExample/KMeansModel"</span><span class="o">);</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/mllib/JavaKMeansExample.java" in the Spark repo.</small></div>
</div>
</div>
<h2 id="gaussian-mixture">Gaussian mixture</h2>
<p>A <a href="http://en.wikipedia.org/wiki/Mixture_model#Multivariate_Gaussian_mixture_model">Gaussian Mixture Model</a>
represents a composite distribution whereby points are drawn from one of <em>k</em> Gaussian sub-distributions,
each with its own probability. The <code class="language-plaintext highlighter-rouge">spark.mllib</code> implementation uses the
<a href="http://en.wikipedia.org/wiki/Expectation%E2%80%93maximization_algorithm">expectation-maximization</a>
algorithm to induce the maximum-likelihood model given a set of samples. The implementation
has the following parameters:</p>
<ul>
<li><em>k</em> is the number of desired clusters.</li>
<li><em>convergenceTol</em> is the maximum change in log-likelihood at which we consider convergence achieved.</li>
<li><em>maxIterations</em> is the maximum number of iterations to perform without reaching convergence.</li>
<li><em>initialModel</em> is an optional starting point from which to start the EM algorithm. If this parameter is omitted, a random starting point will be constructed from the data.</li>
</ul>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>In the following example after loading and parsing data, we use a
<a href="api/python/reference/api/pyspark.mllib.clustering.GaussianMixture.html">GaussianMixture</a>
object to cluster the data into two clusters. The number of desired clusters is passed
to the algorithm. We then output the parameters of the mixture model.</p>
<p>Refer to the <a href="api/python/reference/api/pyspark.mllib.clustering.GaussianMixture.html"><code class="language-plaintext highlighter-rouge">GaussianMixture</code> Python docs</a> and <a href="api/python/reference/api/pyspark.mllib.clustering.GaussianMixtureModel.html"><code class="language-plaintext highlighter-rouge">GaussianMixtureModel</code> Python docs</a> for more details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">numpy</span> <span class="kn">import</span> <span class="n">array</span>
<span class="kn">from</span> <span class="nn">pyspark.mllib.clustering</span> <span class="kn">import</span> <span class="n">GaussianMixture</span><span class="p">,</span> <span class="n">GaussianMixtureModel</span>
<span class="c1"># Load and parse the data
</span><span class="n">data</span> <span class="o">=</span> <span class="n">sc</span><span class="p">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">"data/mllib/gmm_data.txt"</span><span class="p">)</span>
<span class="n">parsedData</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">array</span><span class="p">([</span><span class="nb">float</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">line</span><span class="p">.</span><span class="n">strip</span><span class="p">().</span><span class="n">split</span><span class="p">(</span><span class="s">' '</span><span class="p">)]))</span>
<span class="c1"># Build the model (cluster the data)
</span><span class="n">gmm</span> <span class="o">=</span> <span class="n">GaussianMixture</span><span class="p">.</span><span class="n">train</span><span class="p">(</span><span class="n">parsedData</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="c1"># Save and load model
</span><span class="n">gmm</span><span class="p">.</span><span class="n">save</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel"</span><span class="p">)</span>
<span class="n">sameModel</span> <span class="o">=</span> <span class="n">GaussianMixtureModel</span>\
<span class="p">.</span><span class="n">load</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel"</span><span class="p">)</span>
<span class="c1"># output parameters of model
</span><span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">2</span><span class="p">):</span>
<span class="k">print</span><span class="p">(</span><span class="s">"weight = "</span><span class="p">,</span> <span class="n">gmm</span><span class="p">.</span><span class="n">weights</span><span class="p">[</span><span class="n">i</span><span class="p">],</span> <span class="s">"mu = "</span><span class="p">,</span> <span class="n">gmm</span><span class="p">.</span><span class="n">gaussians</span><span class="p">[</span><span class="n">i</span><span class="p">].</span><span class="n">mu</span><span class="p">,</span>
<span class="s">"sigma = "</span><span class="p">,</span> <span class="n">gmm</span><span class="p">.</span><span class="n">gaussians</span><span class="p">[</span><span class="n">i</span><span class="p">].</span><span class="n">sigma</span><span class="p">.</span><span class="n">toArray</span><span class="p">())</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/mllib/gaussian_mixture_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>In the following example after loading and parsing data, we use a
<a href="api/scala/org/apache/spark/mllib/clustering/GaussianMixture.html">GaussianMixture</a>
object to cluster the data into two clusters. The number of desired clusters is passed
to the algorithm. We then output the parameters of the mixture model.</p>
<p>Refer to the <a href="api/scala/org/apache/spark/mllib/clustering/GaussianMixture.html"><code class="language-plaintext highlighter-rouge">GaussianMixture</code> Scala docs</a> and <a href="api/scala/org/apache/spark/mllib/clustering/GaussianMixtureModel.html"><code class="language-plaintext highlighter-rouge">GaussianMixtureModel</code> Scala docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.mllib.clustering.</span><span class="o">{</span><span class="nc">GaussianMixture</span><span class="o">,</span> <span class="nc">GaussianMixtureModel</span><span class="o">}</span>
<span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span>
<span class="c1">// Load and parse the data</span>
<span class="k">val</span> <span class="nv">data</span> <span class="k">=</span> <span class="nv">sc</span><span class="o">.</span><span class="py">textFile</span><span class="o">(</span><span class="s">"data/mllib/gmm_data.txt"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">parsedData</span> <span class="k">=</span> <span class="nv">data</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="n">s</span> <span class="k">=&gt;</span> <span class="nv">Vectors</span><span class="o">.</span><span class="py">dense</span><span class="o">(</span><span class="nv">s</span><span class="o">.</span><span class="py">trim</span><span class="o">.</span><span class="py">split</span><span class="o">(</span><span class="sc">' '</span><span class="o">).</span><span class="py">map</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">toDouble</span><span class="o">))).</span><span class="py">cache</span><span class="o">()</span>
<span class="c1">// Cluster the data into two classes using GaussianMixture</span>
<span class="k">val</span> <span class="nv">gmm</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">GaussianMixture</span><span class="o">().</span><span class="py">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span><span class="py">run</span><span class="o">(</span><span class="n">parsedData</span><span class="o">)</span>
<span class="c1">// Save and load model</span>
<span class="nv">gmm</span><span class="o">.</span><span class="py">save</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span> <span class="s">"target/org/apache/spark/GaussianMixtureExample/GaussianMixtureModel"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">sameModel</span> <span class="k">=</span> <span class="nv">GaussianMixtureModel</span><span class="o">.</span><span class="py">load</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span>
<span class="s">"target/org/apache/spark/GaussianMixtureExample/GaussianMixtureModel"</span><span class="o">)</span>
<span class="c1">// output parameters of max-likelihood model</span>
<span class="nf">for</span> <span class="o">(</span><span class="n">i</span> <span class="k">&lt;-</span> <span class="mi">0</span> <span class="n">until</span> <span class="nv">gmm</span><span class="o">.</span><span class="py">k</span><span class="o">)</span> <span class="o">{</span>
<span class="nf">println</span><span class="o">(</span><span class="s">"weight=%f\nmu=%s\nsigma=\n%s\n"</span> <span class="nf">format</span>
<span class="o">(</span><span class="nv">gmm</span><span class="o">.</span><span class="py">weights</span><span class="o">(</span><span class="n">i</span><span class="o">),</span> <span class="nv">gmm</span><span class="o">.</span><span class="py">gaussians</span><span class="o">(</span><span class="n">i</span><span class="o">).</span><span class="py">mu</span><span class="o">,</span> <span class="nv">gmm</span><span class="o">.</span><span class="py">gaussians</span><span class="o">(</span><span class="n">i</span><span class="o">).</span><span class="py">sigma</span><span class="o">))</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/mllib/GaussianMixtureExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>All of MLlib&#8217;s methods use Java-friendly types, so you can import and call them there the same
way you do in Scala. The only caveat is that the methods take Scala RDD objects, while the
Spark Java API uses a separate <code class="language-plaintext highlighter-rouge">JavaRDD</code> class. You can convert a Java RDD to a Scala one by
calling <code class="language-plaintext highlighter-rouge">.rdd()</code> on your <code class="language-plaintext highlighter-rouge">JavaRDD</code> object. A self-contained application example
that is equivalent to the provided example in Scala is given below:</p>
<p>Refer to the <a href="api/java/org/apache/spark/mllib/clustering/GaussianMixture.html"><code class="language-plaintext highlighter-rouge">GaussianMixture</code> Java docs</a> and <a href="api/java/org/apache/spark/mllib/clustering/GaussianMixtureModel.html"><code class="language-plaintext highlighter-rouge">GaussianMixtureModel</code> Java docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaRDD</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.GaussianMixture</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.GaussianMixtureModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vector</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span><span class="o">;</span>
<span class="c1">// Load and parse data</span>
<span class="nc">String</span> <span class="n">path</span> <span class="o">=</span> <span class="s">"data/mllib/gmm_data.txt"</span><span class="o">;</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="n">jsc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="n">path</span><span class="o">);</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">Vector</span><span class="o">&gt;</span> <span class="n">parsedData</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="na">map</span><span class="o">(</span><span class="n">s</span> <span class="o">-&gt;</span> <span class="o">{</span>
<span class="nc">String</span><span class="o">[]</span> <span class="n">sarray</span> <span class="o">=</span> <span class="n">s</span><span class="o">.</span><span class="na">trim</span><span class="o">().</span><span class="na">split</span><span class="o">(</span><span class="s">" "</span><span class="o">);</span>
<span class="kt">double</span><span class="o">[]</span> <span class="n">values</span> <span class="o">=</span> <span class="k">new</span> <span class="kt">double</span><span class="o">[</span><span class="n">sarray</span><span class="o">.</span><span class="na">length</span><span class="o">];</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">sarray</span><span class="o">.</span><span class="na">length</span><span class="o">;</span> <span class="n">i</span><span class="o">++)</span> <span class="o">{</span>
<span class="n">values</span><span class="o">[</span><span class="n">i</span><span class="o">]</span> <span class="o">=</span> <span class="nc">Double</span><span class="o">.</span><span class="na">parseDouble</span><span class="o">(</span><span class="n">sarray</span><span class="o">[</span><span class="n">i</span><span class="o">]);</span>
<span class="o">}</span>
<span class="k">return</span> <span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="n">values</span><span class="o">);</span>
<span class="o">});</span>
<span class="n">parsedData</span><span class="o">.</span><span class="na">cache</span><span class="o">();</span>
<span class="c1">// Cluster the data into two classes using GaussianMixture</span>
<span class="nc">GaussianMixtureModel</span> <span class="n">gmm</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">GaussianMixture</span><span class="o">().</span><span class="na">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span><span class="na">run</span><span class="o">(</span><span class="n">parsedData</span><span class="o">.</span><span class="na">rdd</span><span class="o">());</span>
<span class="c1">// Save and load GaussianMixtureModel</span>
<span class="n">gmm</span><span class="o">.</span><span class="na">save</span><span class="o">(</span><span class="n">jsc</span><span class="o">.</span><span class="na">sc</span><span class="o">(),</span> <span class="s">"target/org/apache/spark/JavaGaussianMixtureExample/GaussianMixtureModel"</span><span class="o">);</span>
<span class="nc">GaussianMixtureModel</span> <span class="n">sameModel</span> <span class="o">=</span> <span class="nc">GaussianMixtureModel</span><span class="o">.</span><span class="na">load</span><span class="o">(</span><span class="n">jsc</span><span class="o">.</span><span class="na">sc</span><span class="o">(),</span>
<span class="s">"target/org.apache.spark.JavaGaussianMixtureExample/GaussianMixtureModel"</span><span class="o">);</span>
<span class="c1">// Output the parameters of the mixture model</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">j</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">j</span> <span class="o">&lt;</span> <span class="n">gmm</span><span class="o">.</span><span class="na">k</span><span class="o">();</span> <span class="n">j</span><span class="o">++)</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">printf</span><span class="o">(</span><span class="s">"weight=%f\nmu=%s\nsigma=\n%s\n"</span><span class="o">,</span>
<span class="n">gmm</span><span class="o">.</span><span class="na">weights</span><span class="o">()[</span><span class="n">j</span><span class="o">],</span> <span class="n">gmm</span><span class="o">.</span><span class="na">gaussians</span><span class="o">()[</span><span class="n">j</span><span class="o">].</span><span class="na">mu</span><span class="o">(),</span> <span class="n">gmm</span><span class="o">.</span><span class="na">gaussians</span><span class="o">()[</span><span class="n">j</span><span class="o">].</span><span class="na">sigma</span><span class="o">());</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/mllib/JavaGaussianMixtureExample.java" in the Spark repo.</small></div>
</div>
</div>
<h2 id="power-iteration-clustering-pic">Power iteration clustering (PIC)</h2>
<p>Power iteration clustering (PIC) is a scalable and efficient algorithm for clustering vertices of a
graph given pairwise similarities as edge properties,
described in <a href="http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf">Lin and Cohen, Power Iteration Clustering</a>.
It computes a pseudo-eigenvector of the normalized affinity matrix of the graph via
<a href="http://en.wikipedia.org/wiki/Power_iteration">power iteration</a> and uses it to cluster vertices.
<code class="language-plaintext highlighter-rouge">spark.mllib</code> includes an implementation of PIC using GraphX as its backend.
It takes an <code class="language-plaintext highlighter-rouge">RDD</code> of <code class="language-plaintext highlighter-rouge">(srcId, dstId, similarity)</code> tuples and outputs a model with the clustering assignments.
The similarities must be nonnegative.
PIC assumes that the similarity measure is symmetric.
A pair <code class="language-plaintext highlighter-rouge">(srcId, dstId)</code> regardless of the ordering should appear at most once in the input data.
If a pair is missing from input, their similarity is treated as zero.
<code class="language-plaintext highlighter-rouge">spark.mllib</code>&#8217;s PIC implementation takes the following (hyper-)parameters:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">k</code>: number of clusters</li>
<li><code class="language-plaintext highlighter-rouge">maxIterations</code>: maximum number of power iterations</li>
<li><code class="language-plaintext highlighter-rouge">initializationMode</code>: initialization model. This can be either &#8220;random&#8221;, which is the default,
to use a random vector as vertex properties, or &#8220;degree&#8221; to use normalized sum similarities.</li>
</ul>
<p><strong>Examples</strong></p>
<p>In the following, we show code snippets to demonstrate how to use PIC in <code class="language-plaintext highlighter-rouge">spark.mllib</code>.</p>
<div class="codetabs">
<div data-lang="python">
<p><a href="api/python/reference/api/pyspark.mllib.clustering.PowerIterationClustering.html"><code class="language-plaintext highlighter-rouge">PowerIterationClustering</code></a>
implements the PIC algorithm.
It takes an <code class="language-plaintext highlighter-rouge">RDD</code> of <code class="language-plaintext highlighter-rouge">(srcId: Long, dstId: Long, similarity: Double)</code> tuples representing the
affinity matrix.
Calling <code class="language-plaintext highlighter-rouge">PowerIterationClustering.run</code> returns a
<a href="api/python/reference/api/pyspark.mllib.clustering.PowerIterationClustering.html"><code class="language-plaintext highlighter-rouge">PowerIterationClusteringModel</code></a>,
which contains the computed clustering assignments.</p>
<p>Refer to the <a href="api/python/reference/api/pyspark.mllib.clustering.PowerIterationClustering.html"><code class="language-plaintext highlighter-rouge">PowerIterationClustering</code> Python docs</a> and <a href="api/python/reference/api/pyspark.mllib.clustering.PowerIterationClusteringModel.html"><code class="language-plaintext highlighter-rouge">PowerIterationClusteringModel</code> Python docs</a> for more details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.mllib.clustering</span> <span class="kn">import</span> <span class="n">PowerIterationClustering</span><span class="p">,</span> <span class="n">PowerIterationClusteringModel</span>
<span class="c1"># Load and parse the data
</span><span class="n">data</span> <span class="o">=</span> <span class="n">sc</span><span class="p">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">"data/mllib/pic_data.txt"</span><span class="p">)</span>
<span class="n">similarities</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="nb">tuple</span><span class="p">([</span><span class="nb">float</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">line</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="s">' '</span><span class="p">)]))</span>
<span class="c1"># Cluster the data into two classes using PowerIterationClustering
</span><span class="n">model</span> <span class="o">=</span> <span class="n">PowerIterationClustering</span><span class="p">.</span><span class="n">train</span><span class="p">(</span><span class="n">similarities</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">10</span><span class="p">)</span>
<span class="n">model</span><span class="p">.</span><span class="n">assignments</span><span class="p">().</span><span class="n">foreach</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="k">print</span><span class="p">(</span><span class="nb">str</span><span class="p">(</span><span class="n">x</span><span class="p">.</span><span class="nb">id</span><span class="p">)</span> <span class="o">+</span> <span class="s">" -&gt; "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">x</span><span class="p">.</span><span class="n">cluster</span><span class="p">)))</span>
<span class="c1"># Save and load model
</span><span class="n">model</span><span class="p">.</span><span class="n">save</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonPowerIterationClusteringExample/PICModel"</span><span class="p">)</span>
<span class="n">sameModel</span> <span class="o">=</span> <span class="n">PowerIterationClusteringModel</span>\
<span class="p">.</span><span class="n">load</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonPowerIterationClusteringExample/PICModel"</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/mllib/power_iteration_clustering_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p><a href="api/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.html"><code class="language-plaintext highlighter-rouge">PowerIterationClustering</code></a>
implements the PIC algorithm.
It takes an <code class="language-plaintext highlighter-rouge">RDD</code> of <code class="language-plaintext highlighter-rouge">(srcId: Long, dstId: Long, similarity: Double)</code> tuples representing the
affinity matrix.
Calling <code class="language-plaintext highlighter-rouge">PowerIterationClustering.run</code> returns a
<a href="api/scala/org/apache/spark/mllib/clustering/PowerIterationClusteringModel.html"><code class="language-plaintext highlighter-rouge">PowerIterationClusteringModel</code></a>,
which contains the computed clustering assignments.</p>
<p>Refer to the <a href="api/scala/org/apache/spark/mllib/clustering/PowerIterationClustering.html"><code class="language-plaintext highlighter-rouge">PowerIterationClustering</code> Scala docs</a> and <a href="api/scala/org/apache/spark/mllib/clustering/PowerIterationClusteringModel.html"><code class="language-plaintext highlighter-rouge">PowerIterationClusteringModel</code> Scala docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.mllib.clustering.PowerIterationClustering</span>
<span class="k">val</span> <span class="nv">circlesRdd</span> <span class="k">=</span> <span class="nf">generateCirclesRdd</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span> <span class="nv">params</span><span class="o">.</span><span class="py">k</span><span class="o">,</span> <span class="nv">params</span><span class="o">.</span><span class="py">numPoints</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">PowerIterationClustering</span><span class="o">()</span>
<span class="o">.</span><span class="py">setK</span><span class="o">(</span><span class="nv">params</span><span class="o">.</span><span class="py">k</span><span class="o">)</span>
<span class="o">.</span><span class="py">setMaxIterations</span><span class="o">(</span><span class="nv">params</span><span class="o">.</span><span class="py">maxIterations</span><span class="o">)</span>
<span class="o">.</span><span class="py">setInitializationMode</span><span class="o">(</span><span class="s">"degree"</span><span class="o">)</span>
<span class="o">.</span><span class="py">run</span><span class="o">(</span><span class="n">circlesRdd</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">clusters</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">assignments</span><span class="o">.</span><span class="py">collect</span><span class="o">().</span><span class="py">groupBy</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">cluster</span><span class="o">).</span><span class="py">mapValues</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">id</span><span class="o">))</span>
<span class="k">val</span> <span class="nv">assignments</span> <span class="k">=</span> <span class="nv">clusters</span><span class="o">.</span><span class="py">toList</span><span class="o">.</span><span class="py">sortBy</span> <span class="o">{</span> <span class="nf">case</span> <span class="o">(</span><span class="n">k</span><span class="o">,</span> <span class="n">v</span><span class="o">)</span> <span class="k">=&gt;</span> <span class="nv">v</span><span class="o">.</span><span class="py">length</span> <span class="o">}</span>
<span class="k">val</span> <span class="nv">assignmentsStr</span> <span class="k">=</span> <span class="n">assignments</span>
<span class="o">.</span><span class="py">map</span> <span class="o">{</span> <span class="nf">case</span> <span class="o">(</span><span class="n">k</span><span class="o">,</span> <span class="n">v</span><span class="o">)</span> <span class="k">=&gt;</span>
<span class="n">s</span><span class="s">"$k -&gt; ${v.sorted.mkString("</span><span class="o">[</span><span class="err">"</span>, <span class="err">"</span>,<span class="err">"</span>, <span class="err">"</span><span class="o">]</span><span class="s">")}"</span>
<span class="o">}.</span><span class="py">mkString</span><span class="o">(</span><span class="s">", "</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">sizesStr</span> <span class="k">=</span> <span class="nv">assignments</span><span class="o">.</span><span class="py">map</span> <span class="o">{</span>
<span class="nv">_</span><span class="o">.</span><span class="py">_2</span><span class="o">.</span><span class="py">length</span>
<span class="o">}.</span><span class="py">sorted</span><span class="o">.</span><span class="py">mkString</span><span class="o">(</span><span class="s">"("</span><span class="o">,</span> <span class="s">","</span><span class="o">,</span> <span class="s">")"</span><span class="o">)</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Cluster assignments: $assignmentsStr\ncluster sizes: $sizesStr"</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p><a href="api/java/org/apache/spark/mllib/clustering/PowerIterationClustering.html"><code class="language-plaintext highlighter-rouge">PowerIterationClustering</code></a>
implements the PIC algorithm.
It takes a <code class="language-plaintext highlighter-rouge">JavaRDD</code> of <code class="language-plaintext highlighter-rouge">(srcId: Long, dstId: Long, similarity: Double)</code> tuples representing the
affinity matrix.
Calling <code class="language-plaintext highlighter-rouge">PowerIterationClustering.run</code> returns a
<a href="api/java/org/apache/spark/mllib/clustering/PowerIterationClusteringModel.html"><code class="language-plaintext highlighter-rouge">PowerIterationClusteringModel</code></a>
which contains the computed clustering assignments.</p>
<p>Refer to the <a href="api/java/org/apache/spark/mllib/clustering/PowerIterationClustering.html"><code class="language-plaintext highlighter-rouge">PowerIterationClustering</code> Java docs</a> and <a href="api/java/org/apache/spark/mllib/clustering/PowerIterationClusteringModel.html"><code class="language-plaintext highlighter-rouge">PowerIterationClusteringModel</code> Java docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.PowerIterationClustering</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.PowerIterationClusteringModel</span><span class="o">;</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">Tuple3</span><span class="o">&lt;</span><span class="nc">Long</span><span class="o">,</span> <span class="nc">Long</span><span class="o">,</span> <span class="nc">Double</span><span class="o">&gt;&gt;</span> <span class="n">similarities</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">parallelize</span><span class="o">(</span><span class="nc">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span>
<span class="k">new</span> <span class="nc">Tuple3</span><span class="o">&lt;&gt;(</span><span class="mi">0L</span><span class="o">,</span> <span class="mi">1L</span><span class="o">,</span> <span class="mf">0.9</span><span class="o">),</span>
<span class="k">new</span> <span class="nc">Tuple3</span><span class="o">&lt;&gt;(</span><span class="mi">1L</span><span class="o">,</span> <span class="mi">2L</span><span class="o">,</span> <span class="mf">0.9</span><span class="o">),</span>
<span class="k">new</span> <span class="nc">Tuple3</span><span class="o">&lt;&gt;(</span><span class="mi">2L</span><span class="o">,</span> <span class="mi">3L</span><span class="o">,</span> <span class="mf">0.9</span><span class="o">),</span>
<span class="k">new</span> <span class="nc">Tuple3</span><span class="o">&lt;&gt;(</span><span class="mi">3L</span><span class="o">,</span> <span class="mi">4L</span><span class="o">,</span> <span class="mf">0.1</span><span class="o">),</span>
<span class="k">new</span> <span class="nc">Tuple3</span><span class="o">&lt;&gt;(</span><span class="mi">4L</span><span class="o">,</span> <span class="mi">5L</span><span class="o">,</span> <span class="mf">0.9</span><span class="o">)));</span>
<span class="nc">PowerIterationClustering</span> <span class="n">pic</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">PowerIterationClustering</span><span class="o">()</span>
<span class="o">.</span><span class="na">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span>
<span class="o">.</span><span class="na">setMaxIterations</span><span class="o">(</span><span class="mi">10</span><span class="o">);</span>
<span class="nc">PowerIterationClusteringModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">pic</span><span class="o">.</span><span class="na">run</span><span class="o">(</span><span class="n">similarities</span><span class="o">);</span>
<span class="k">for</span> <span class="o">(</span><span class="nc">PowerIterationClustering</span><span class="o">.</span><span class="na">Assignment</span> <span class="nl">a:</span> <span class="n">model</span><span class="o">.</span><span class="na">assignments</span><span class="o">().</span><span class="na">toJavaRDD</span><span class="o">().</span><span class="na">collect</span><span class="o">())</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">a</span><span class="o">.</span><span class="na">id</span><span class="o">()</span> <span class="o">+</span> <span class="s">" -&gt; "</span> <span class="o">+</span> <span class="n">a</span><span class="o">.</span><span class="na">cluster</span><span class="o">());</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/mllib/JavaPowerIterationClusteringExample.java" in the Spark repo.</small></div>
</div>
</div>
<h2 id="latent-dirichlet-allocation-lda">Latent Dirichlet allocation (LDA)</h2>
<p><a href="http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation">Latent Dirichlet allocation (LDA)</a>
is a topic model which infers topics from a collection of text documents.
LDA can be thought of as a clustering algorithm as follows:</p>
<ul>
<li>Topics correspond to cluster centers, and documents correspond to
examples (rows) in a dataset.</li>
<li>Topics and documents both exist in a feature space, where feature
vectors are vectors of word counts (bag of words).</li>
<li>Rather than estimating a clustering using a traditional distance, LDA
uses a function based on a statistical model of how text documents are
generated.</li>
</ul>
<p>LDA supports different inference algorithms via <code class="language-plaintext highlighter-rouge">setOptimizer</code> function.
<code class="language-plaintext highlighter-rouge">EMLDAOptimizer</code> learns clustering using
<a href="http://en.wikipedia.org/wiki/Expectation%E2%80%93maximization_algorithm">expectation-maximization</a>
on the likelihood function and yields comprehensive results, while
<code class="language-plaintext highlighter-rouge">OnlineLDAOptimizer</code> uses iterative mini-batch sampling for <a href="https://mimno.infosci.cornell.edu/info6150/readings/HoffmanBleiBach2010b.pdf">online
variational
inference</a>
and is generally memory friendly.</p>
<p>LDA takes in a collection of documents as vectors of word counts and the
following parameters (set using the builder pattern):</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">k</code>: Number of topics (i.e., cluster centers)</li>
<li><code class="language-plaintext highlighter-rouge">optimizer</code>: Optimizer to use for learning the LDA model, either
<code class="language-plaintext highlighter-rouge">EMLDAOptimizer</code> or <code class="language-plaintext highlighter-rouge">OnlineLDAOptimizer</code></li>
<li><code class="language-plaintext highlighter-rouge">docConcentration</code>: Dirichlet parameter for prior over documents&#8217;
distributions over topics. Larger values encourage smoother inferred
distributions.</li>
<li><code class="language-plaintext highlighter-rouge">topicConcentration</code>: Dirichlet parameter for prior over topics&#8217;
distributions over terms (words). Larger values encourage smoother
inferred distributions.</li>
<li><code class="language-plaintext highlighter-rouge">maxIterations</code>: Limit on the number of iterations.</li>
<li><code class="language-plaintext highlighter-rouge">checkpointInterval</code>: If using checkpointing (set in the Spark
configuration), this parameter specifies the frequency with which
checkpoints will be created. If <code class="language-plaintext highlighter-rouge">maxIterations</code> is large, using
checkpointing can help reduce shuffle file sizes on disk and help with
failure recovery.</li>
</ul>
<p>All of <code class="language-plaintext highlighter-rouge">spark.mllib</code>&#8217;s LDA models support:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">describeTopics</code>: Returns topics as arrays of most important terms and
term weights</li>
<li><code class="language-plaintext highlighter-rouge">topicsMatrix</code>: Returns a <code class="language-plaintext highlighter-rouge">vocabSize</code> by <code class="language-plaintext highlighter-rouge">k</code> matrix where each column
is a topic</li>
</ul>
<p><em>Note</em>: LDA is still an experimental feature under active development.
As a result, certain features are only available in one of the two
optimizers / models generated by the optimizer. Currently, a distributed
model can be converted into a local model, but not vice-versa.</p>
<p>The following discussion will describe each optimizer/model pair
separately.</p>
<p><strong>Expectation Maximization</strong></p>
<p>Implemented in
<a href="api/scala/org/apache/spark/mllib/clustering/EMLDAOptimizer.html"><code class="language-plaintext highlighter-rouge">EMLDAOptimizer</code></a>
and
<a href="api/scala/org/apache/spark/mllib/clustering/DistributedLDAModel.html"><code class="language-plaintext highlighter-rouge">DistributedLDAModel</code></a>.</p>
<p>For the parameters provided to <code class="language-plaintext highlighter-rouge">LDA</code>:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">docConcentration</code>: Only symmetric priors are supported, so all values
in the provided <code class="language-plaintext highlighter-rouge">k</code>-dimensional vector must be identical. All values
must also be $&gt; 1.0$. Providing <code class="language-plaintext highlighter-rouge">Vector(-1)</code> results in default behavior
(uniform <code class="language-plaintext highlighter-rouge">k</code> dimensional vector with value $(50 / k) + 1$</li>
<li><code class="language-plaintext highlighter-rouge">topicConcentration</code>: Only symmetric priors supported. Values must be
$&gt; 1.0$. Providing <code class="language-plaintext highlighter-rouge">-1</code> results in defaulting to a value of $0.1 + 1$.</li>
<li><code class="language-plaintext highlighter-rouge">maxIterations</code>: The maximum number of EM iterations.</li>
</ul>
<p><em>Note</em>: It is important to do enough iterations. In early iterations, EM often has useless topics,
but those topics improve dramatically after more iterations. Using at least 20 and possibly
50-100 iterations is often reasonable, depending on your dataset.</p>
<p><code class="language-plaintext highlighter-rouge">EMLDAOptimizer</code> produces a <code class="language-plaintext highlighter-rouge">DistributedLDAModel</code>, which stores not only
the inferred topics but also the full training corpus and topic
distributions for each document in the training corpus. A
<code class="language-plaintext highlighter-rouge">DistributedLDAModel</code> supports:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">topTopicsPerDocument</code>: The top topics and their weights for
each document in the training corpus</li>
<li><code class="language-plaintext highlighter-rouge">topDocumentsPerTopic</code>: The top documents for each topic and
the corresponding weight of the topic in the documents.</li>
<li><code class="language-plaintext highlighter-rouge">logPrior</code>: log probability of the estimated topics and
document-topic distributions given the hyperparameters
<code class="language-plaintext highlighter-rouge">docConcentration</code> and <code class="language-plaintext highlighter-rouge">topicConcentration</code></li>
<li><code class="language-plaintext highlighter-rouge">logLikelihood</code>: log likelihood of the training corpus, given the
inferred topics and document-topic distributions</li>
</ul>
<p><strong>Online Variational Bayes</strong></p>
<p>Implemented in
<a href="api/scala/org/apache/spark/mllib/clustering/OnlineLDAOptimizer.html"><code class="language-plaintext highlighter-rouge">OnlineLDAOptimizer</code></a>
and
<a href="api/scala/org/apache/spark/mllib/clustering/LocalLDAModel.html"><code class="language-plaintext highlighter-rouge">LocalLDAModel</code></a>.</p>
<p>For the parameters provided to <code class="language-plaintext highlighter-rouge">LDA</code>:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">docConcentration</code>: Asymmetric priors can be used by passing in a
vector with values equal to the Dirichlet parameter in each of the <code class="language-plaintext highlighter-rouge">k</code>
dimensions. Values should be $&gt;= 0$. Providing <code class="language-plaintext highlighter-rouge">Vector(-1)</code> results in
default behavior (uniform <code class="language-plaintext highlighter-rouge">k</code> dimensional vector with value $(1.0 / k)$)</li>
<li><code class="language-plaintext highlighter-rouge">topicConcentration</code>: Only symmetric priors supported. Values must be
$&gt;= 0$. Providing <code class="language-plaintext highlighter-rouge">-1</code> results in defaulting to a value of $(1.0 / k)$.</li>
<li><code class="language-plaintext highlighter-rouge">maxIterations</code>: Maximum number of minibatches to submit.</li>
</ul>
<p>In addition, <code class="language-plaintext highlighter-rouge">OnlineLDAOptimizer</code> accepts the following parameters:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">miniBatchFraction</code>: Fraction of corpus sampled and used at each
iteration</li>
<li><code class="language-plaintext highlighter-rouge">optimizeDocConcentration</code>: If set to true, performs maximum-likelihood
estimation of the hyperparameter <code class="language-plaintext highlighter-rouge">docConcentration</code> (aka <code class="language-plaintext highlighter-rouge">alpha</code>)
after each minibatch and sets the optimized <code class="language-plaintext highlighter-rouge">docConcentration</code> in the
returned <code class="language-plaintext highlighter-rouge">LocalLDAModel</code></li>
<li><code class="language-plaintext highlighter-rouge">tau0</code> and <code class="language-plaintext highlighter-rouge">kappa</code>: Used for learning-rate decay, which is computed by
$(\tau_0 + iter)^{-\kappa}$ where $iter$ is the current number of iterations.</li>
</ul>
<p><code class="language-plaintext highlighter-rouge">OnlineLDAOptimizer</code> produces a <code class="language-plaintext highlighter-rouge">LocalLDAModel</code>, which only stores the
inferred topics. A <code class="language-plaintext highlighter-rouge">LocalLDAModel</code> supports:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">logLikelihood(documents)</code>: Calculates a lower bound on the provided
<code class="language-plaintext highlighter-rouge">documents</code> given the inferred topics.</li>
<li><code class="language-plaintext highlighter-rouge">logPerplexity(documents)</code>: Calculates an upper bound on the
perplexity of the provided <code class="language-plaintext highlighter-rouge">documents</code> given the inferred topics.</li>
</ul>
<p><strong>Examples</strong></p>
<p>In the following example, we load word count vectors representing a corpus of documents.
We then use <a href="api/scala/org/apache/spark/mllib/clustering/LDA.html">LDA</a>
to infer three topics from the documents. The number of desired clusters is passed
to the algorithm. We then output the topics, represented as probability distributions over words.</p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.mllib.clustering.LDA.html"><code class="language-plaintext highlighter-rouge">LDA</code> Python docs</a> and <a href="api/python/reference/api/pyspark.mllib.clustering.LDAModel.html"><code class="language-plaintext highlighter-rouge">LDAModel</code> Python docs</a> for more details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.mllib.clustering</span> <span class="kn">import</span> <span class="n">LDA</span><span class="p">,</span> <span class="n">LDAModel</span>
<span class="kn">from</span> <span class="nn">pyspark.mllib.linalg</span> <span class="kn">import</span> <span class="n">Vectors</span>
<span class="c1"># Load and parse the data
</span><span class="n">data</span> <span class="o">=</span> <span class="n">sc</span><span class="p">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">"data/mllib/sample_lda_data.txt"</span><span class="p">)</span>
<span class="n">parsedData</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">Vectors</span><span class="p">.</span><span class="n">dense</span><span class="p">([</span><span class="nb">float</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">line</span><span class="p">.</span><span class="n">strip</span><span class="p">().</span><span class="n">split</span><span class="p">(</span><span class="s">' '</span><span class="p">)]))</span>
<span class="c1"># Index documents with unique IDs
</span><span class="n">corpus</span> <span class="o">=</span> <span class="n">parsedData</span><span class="p">.</span><span class="n">zipWithIndex</span><span class="p">().</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="p">[</span><span class="n">x</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">x</span><span class="p">[</span><span class="mi">0</span><span class="p">]]).</span><span class="n">cache</span><span class="p">()</span>
<span class="c1"># Cluster the documents into three topics using LDA
</span><span class="n">ldaModel</span> <span class="o">=</span> <span class="n">LDA</span><span class="p">.</span><span class="n">train</span><span class="p">(</span><span class="n">corpus</span><span class="p">,</span> <span class="n">k</span><span class="o">=</span><span class="mi">3</span><span class="p">)</span>
<span class="c1"># Output topics. Each is a distribution over words (matching word count vectors)
</span><span class="k">print</span><span class="p">(</span><span class="s">"Learned topics (as distributions over vocab of "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">ldaModel</span><span class="p">.</span><span class="n">vocabSize</span><span class="p">())</span>
<span class="o">+</span> <span class="s">" words):"</span><span class="p">)</span>
<span class="n">topics</span> <span class="o">=</span> <span class="n">ldaModel</span><span class="p">.</span><span class="n">topicsMatrix</span><span class="p">()</span>
<span class="k">for</span> <span class="n">topic</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">3</span><span class="p">):</span>
<span class="k">print</span><span class="p">(</span><span class="s">"Topic "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">topic</span><span class="p">)</span> <span class="o">+</span> <span class="s">":"</span><span class="p">)</span>
<span class="k">for</span> <span class="n">word</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="n">ldaModel</span><span class="p">.</span><span class="n">vocabSize</span><span class="p">()):</span>
<span class="k">print</span><span class="p">(</span><span class="s">" "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">topics</span><span class="p">[</span><span class="n">word</span><span class="p">][</span><span class="n">topic</span><span class="p">]))</span>
<span class="c1"># Save and load model
</span><span class="n">ldaModel</span><span class="p">.</span><span class="n">save</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonLatentDirichletAllocationExample/LDAModel"</span><span class="p">)</span>
<span class="n">sameModel</span> <span class="o">=</span> <span class="n">LDAModel</span>\
<span class="p">.</span><span class="n">load</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="s">"target/org/apache/spark/PythonLatentDirichletAllocationExample/LDAModel"</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/mllib/latent_dirichlet_allocation_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/mllib/clustering/LDA.html"><code class="language-plaintext highlighter-rouge">LDA</code> Scala docs</a> and <a href="api/scala/org/apache/spark/mllib/clustering/DistributedLDAModel.html"><code class="language-plaintext highlighter-rouge">DistributedLDAModel</code> Scala docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.mllib.clustering.</span><span class="o">{</span><span class="nc">DistributedLDAModel</span><span class="o">,</span> <span class="nc">LDA</span><span class="o">}</span>
<span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span>
<span class="c1">// Load and parse the data</span>
<span class="k">val</span> <span class="nv">data</span> <span class="k">=</span> <span class="nv">sc</span><span class="o">.</span><span class="py">textFile</span><span class="o">(</span><span class="s">"data/mllib/sample_lda_data.txt"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">parsedData</span> <span class="k">=</span> <span class="nv">data</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="n">s</span> <span class="k">=&gt;</span> <span class="nv">Vectors</span><span class="o">.</span><span class="py">dense</span><span class="o">(</span><span class="nv">s</span><span class="o">.</span><span class="py">trim</span><span class="o">.</span><span class="py">split</span><span class="o">(</span><span class="sc">' '</span><span class="o">).</span><span class="py">map</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">toDouble</span><span class="o">)))</span>
<span class="c1">// Index documents with unique IDs</span>
<span class="k">val</span> <span class="nv">corpus</span> <span class="k">=</span> <span class="nv">parsedData</span><span class="o">.</span><span class="py">zipWithIndex</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">swap</span><span class="o">).</span><span class="py">cache</span><span class="o">()</span>
<span class="c1">// Cluster the documents into three topics using LDA</span>
<span class="k">val</span> <span class="nv">ldaModel</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">LDA</span><span class="o">().</span><span class="py">setK</span><span class="o">(</span><span class="mi">3</span><span class="o">).</span><span class="py">run</span><span class="o">(</span><span class="n">corpus</span><span class="o">)</span>
<span class="c1">// Output topics. Each is a distribution over words (matching word count vectors)</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Learned topics (as distributions over vocab of ${ldaModel.vocabSize} words):"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">topics</span> <span class="k">=</span> <span class="nv">ldaModel</span><span class="o">.</span><span class="py">topicsMatrix</span>
<span class="nf">for</span> <span class="o">(</span><span class="n">topic</span> <span class="k">&lt;-</span> <span class="nc">Range</span><span class="o">(</span><span class="mi">0</span><span class="o">,</span> <span class="mi">3</span><span class="o">))</span> <span class="o">{</span>
<span class="nf">print</span><span class="o">(</span><span class="n">s</span><span class="s">"Topic $topic :"</span><span class="o">)</span>
<span class="nf">for</span> <span class="o">(</span><span class="n">word</span> <span class="k">&lt;-</span> <span class="nc">Range</span><span class="o">(</span><span class="mi">0</span><span class="o">,</span> <span class="nv">ldaModel</span><span class="o">.</span><span class="py">vocabSize</span><span class="o">))</span> <span class="o">{</span>
<span class="nf">print</span><span class="o">(</span><span class="n">s</span><span class="s">"${topics(word, topic)}"</span><span class="o">)</span>
<span class="o">}</span>
<span class="nf">println</span><span class="o">()</span>
<span class="o">}</span>
<span class="c1">// Save and load model.</span>
<span class="nv">ldaModel</span><span class="o">.</span><span class="py">save</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span> <span class="s">"target/org/apache/spark/LatentDirichletAllocationExample/LDAModel"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">sameModel</span> <span class="k">=</span> <span class="nv">DistributedLDAModel</span><span class="o">.</span><span class="py">load</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span>
<span class="s">"target/org/apache/spark/LatentDirichletAllocationExample/LDAModel"</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/mllib/LatentDirichletAllocationExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/mllib/clustering/LDA.html"><code class="language-plaintext highlighter-rouge">LDA</code> Java docs</a> and <a href="api/java/org/apache/spark/mllib/clustering/DistributedLDAModel.html"><code class="language-plaintext highlighter-rouge">DistributedLDAModel</code> Java docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">scala.Tuple2</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaPairRDD</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaRDD</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.DistributedLDAModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.LDA</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.LDAModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Matrix</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vector</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span><span class="o">;</span>
<span class="c1">// Load and parse the data</span>
<span class="nc">String</span> <span class="n">path</span> <span class="o">=</span> <span class="s">"data/mllib/sample_lda_data.txt"</span><span class="o">;</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="n">jsc</span><span class="o">.</span><span class="na">textFile</span><span class="o">(</span><span class="n">path</span><span class="o">);</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">Vector</span><span class="o">&gt;</span> <span class="n">parsedData</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="na">map</span><span class="o">(</span><span class="n">s</span> <span class="o">-&gt;</span> <span class="o">{</span>
<span class="nc">String</span><span class="o">[]</span> <span class="n">sarray</span> <span class="o">=</span> <span class="n">s</span><span class="o">.</span><span class="na">trim</span><span class="o">().</span><span class="na">split</span><span class="o">(</span><span class="s">" "</span><span class="o">);</span>
<span class="kt">double</span><span class="o">[]</span> <span class="n">values</span> <span class="o">=</span> <span class="k">new</span> <span class="kt">double</span><span class="o">[</span><span class="n">sarray</span><span class="o">.</span><span class="na">length</span><span class="o">];</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">sarray</span><span class="o">.</span><span class="na">length</span><span class="o">;</span> <span class="n">i</span><span class="o">++)</span> <span class="o">{</span>
<span class="n">values</span><span class="o">[</span><span class="n">i</span><span class="o">]</span> <span class="o">=</span> <span class="nc">Double</span><span class="o">.</span><span class="na">parseDouble</span><span class="o">(</span><span class="n">sarray</span><span class="o">[</span><span class="n">i</span><span class="o">]);</span>
<span class="o">}</span>
<span class="k">return</span> <span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="n">values</span><span class="o">);</span>
<span class="o">});</span>
<span class="c1">// Index documents with unique IDs</span>
<span class="nc">JavaPairRDD</span><span class="o">&lt;</span><span class="nc">Long</span><span class="o">,</span> <span class="nc">Vector</span><span class="o">&gt;</span> <span class="n">corpus</span> <span class="o">=</span>
<span class="nc">JavaPairRDD</span><span class="o">.</span><span class="na">fromJavaRDD</span><span class="o">(</span><span class="n">parsedData</span><span class="o">.</span><span class="na">zipWithIndex</span><span class="o">().</span><span class="na">map</span><span class="o">(</span><span class="nl">Tuple2:</span><span class="o">:</span><span class="n">swap</span><span class="o">));</span>
<span class="n">corpus</span><span class="o">.</span><span class="na">cache</span><span class="o">();</span>
<span class="c1">// Cluster the documents into three topics using LDA</span>
<span class="nc">LDAModel</span> <span class="n">ldaModel</span> <span class="o">=</span> <span class="k">new</span> <span class="no">LDA</span><span class="o">().</span><span class="na">setK</span><span class="o">(</span><span class="mi">3</span><span class="o">).</span><span class="na">run</span><span class="o">(</span><span class="n">corpus</span><span class="o">);</span>
<span class="c1">// Output topics. Each is a distribution over words (matching word count vectors)</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Learned topics (as distributions over vocab of "</span> <span class="o">+</span> <span class="n">ldaModel</span><span class="o">.</span><span class="na">vocabSize</span><span class="o">()</span>
<span class="o">+</span> <span class="s">" words):"</span><span class="o">);</span>
<span class="nc">Matrix</span> <span class="n">topics</span> <span class="o">=</span> <span class="n">ldaModel</span><span class="o">.</span><span class="na">topicsMatrix</span><span class="o">();</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">topic</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">topic</span> <span class="o">&lt;</span> <span class="mi">3</span><span class="o">;</span> <span class="n">topic</span><span class="o">++)</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">print</span><span class="o">(</span><span class="s">"Topic "</span> <span class="o">+</span> <span class="n">topic</span> <span class="o">+</span> <span class="s">":"</span><span class="o">);</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">word</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">word</span> <span class="o">&lt;</span> <span class="n">ldaModel</span><span class="o">.</span><span class="na">vocabSize</span><span class="o">();</span> <span class="n">word</span><span class="o">++)</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">print</span><span class="o">(</span><span class="s">" "</span> <span class="o">+</span> <span class="n">topics</span><span class="o">.</span><span class="na">apply</span><span class="o">(</span><span class="n">word</span><span class="o">,</span> <span class="n">topic</span><span class="o">));</span>
<span class="o">}</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">();</span>
<span class="o">}</span>
<span class="n">ldaModel</span><span class="o">.</span><span class="na">save</span><span class="o">(</span><span class="n">jsc</span><span class="o">.</span><span class="na">sc</span><span class="o">(),</span>
<span class="s">"target/org/apache/spark/JavaLatentDirichletAllocationExample/LDAModel"</span><span class="o">);</span>
<span class="nc">DistributedLDAModel</span> <span class="n">sameModel</span> <span class="o">=</span> <span class="nc">DistributedLDAModel</span><span class="o">.</span><span class="na">load</span><span class="o">(</span><span class="n">jsc</span><span class="o">.</span><span class="na">sc</span><span class="o">(),</span>
<span class="s">"target/org/apache/spark/JavaLatentDirichletAllocationExample/LDAModel"</span><span class="o">);</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/mllib/JavaLatentDirichletAllocationExample.java" in the Spark repo.</small></div>
</div>
</div>
<h2 id="bisecting-k-means">Bisecting k-means</h2>
<p>Bisecting K-means can often be much faster than regular K-means, but it will generally produce a different clustering.</p>
<p>Bisecting k-means is a kind of <a href="https://en.wikipedia.org/wiki/Hierarchical_clustering">hierarchical clustering</a>.
Hierarchical clustering is one of the most commonly used method of cluster analysis which seeks to build a hierarchy of clusters.
Strategies for hierarchical clustering generally fall into two types:</p>
<ul>
<li>Agglomerative: This is a &#8220;bottom up&#8221; approach: each observation starts in its own cluster, and pairs of clusters are merged as one moves up the hierarchy.</li>
<li>Divisive: This is a &#8220;top down&#8221; approach: all observations start in one cluster, and splits are performed recursively as one moves down the hierarchy.</li>
</ul>
<p>Bisecting k-means algorithm is a kind of divisive algorithms.
The implementation in MLlib has the following parameters:</p>
<ul>
<li><em>k</em>: the desired number of leaf clusters (default: 4). The actual number could be smaller if there are no divisible leaf clusters.</li>
<li><em>maxIterations</em>: the max number of k-means iterations to split clusters (default: 20)</li>
<li><em>minDivisibleClusterSize</em>: the minimum number of points (if &gt;= 1.0) or the minimum proportion of points (if &lt; 1.0) of a divisible cluster (default: 1)</li>
<li><em>seed</em>: a random seed (default: hash value of the class name)</li>
</ul>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.mllib.clustering.BisectingKMeans.html"><code class="language-plaintext highlighter-rouge">BisectingKMeans</code> Python docs</a> and <a href="api/python/reference/api/pyspark.mllib.clustering.BisectingKMeansModel.html"><code class="language-plaintext highlighter-rouge">BisectingKMeansModel</code> Python docs</a> for more details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">numpy</span> <span class="kn">import</span> <span class="n">array</span>
<span class="kn">from</span> <span class="nn">pyspark.mllib.clustering</span> <span class="kn">import</span> <span class="n">BisectingKMeans</span>
<span class="c1"># Load and parse the data
</span><span class="n">data</span> <span class="o">=</span> <span class="n">sc</span><span class="p">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">"data/mllib/kmeans_data.txt"</span><span class="p">)</span>
<span class="n">parsedData</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">array</span><span class="p">([</span><span class="nb">float</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">line</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="s">' '</span><span class="p">)]))</span>
<span class="c1"># Build the model (cluster the data)
</span><span class="n">model</span> <span class="o">=</span> <span class="n">BisectingKMeans</span><span class="p">.</span><span class="n">train</span><span class="p">(</span><span class="n">parsedData</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">maxIterations</span><span class="o">=</span><span class="mi">5</span><span class="p">)</span>
<span class="c1"># Evaluate clustering
</span><span class="n">cost</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">computeCost</span><span class="p">(</span><span class="n">parsedData</span><span class="p">)</span>
<span class="k">print</span><span class="p">(</span><span class="s">"Bisecting K-means Cost = "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">cost</span><span class="p">))</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/mllib/bisecting_k_means_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/mllib/clustering/BisectingKMeans.html"><code class="language-plaintext highlighter-rouge">BisectingKMeans</code> Scala docs</a> and <a href="api/scala/org/apache/spark/mllib/clustering/BisectingKMeansModel.html"><code class="language-plaintext highlighter-rouge">BisectingKMeansModel</code> Scala docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.mllib.clustering.BisectingKMeans</span>
<span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg.</span><span class="o">{</span><span class="nc">Vector</span><span class="o">,</span> <span class="nc">Vectors</span><span class="o">}</span>
<span class="c1">// Loads and parses data</span>
<span class="k">def</span> <span class="nf">parse</span><span class="o">(</span><span class="n">line</span><span class="k">:</span> <span class="kt">String</span><span class="o">)</span><span class="k">:</span> <span class="kt">Vector</span> <span class="o">=</span> <span class="nv">Vectors</span><span class="o">.</span><span class="py">dense</span><span class="o">(</span><span class="nv">line</span><span class="o">.</span><span class="py">split</span><span class="o">(</span><span class="s">" "</span><span class="o">).</span><span class="py">map</span><span class="o">(</span><span class="nv">_</span><span class="o">.</span><span class="py">toDouble</span><span class="o">))</span>
<span class="k">val</span> <span class="nv">data</span> <span class="k">=</span> <span class="nv">sc</span><span class="o">.</span><span class="py">textFile</span><span class="o">(</span><span class="s">"data/mllib/kmeans_data.txt"</span><span class="o">).</span><span class="py">map</span><span class="o">(</span><span class="n">parse</span><span class="o">).</span><span class="py">cache</span><span class="o">()</span>
<span class="c1">// Clustering the data into 6 clusters by BisectingKMeans.</span>
<span class="k">val</span> <span class="nv">bkm</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">BisectingKMeans</span><span class="o">().</span><span class="py">setK</span><span class="o">(</span><span class="mi">6</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="nv">bkm</span><span class="o">.</span><span class="py">run</span><span class="o">(</span><span class="n">data</span><span class="o">)</span>
<span class="c1">// Show the compute cost and the cluster centers</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Compute Cost: ${model.computeCost(data)}"</span><span class="o">)</span>
<span class="nv">model</span><span class="o">.</span><span class="py">clusterCenters</span><span class="o">.</span><span class="py">zipWithIndex</span><span class="o">.</span><span class="py">foreach</span> <span class="o">{</span> <span class="nf">case</span> <span class="o">(</span><span class="n">center</span><span class="o">,</span> <span class="n">idx</span><span class="o">)</span> <span class="k">=&gt;</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Cluster Center ${idx}: ${center}"</span><span class="o">)</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/mllib/BisectingKMeansExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/mllib/clustering/BisectingKMeans.html"><code class="language-plaintext highlighter-rouge">BisectingKMeans</code> Java docs</a> and <a href="api/java/org/apache/spark/mllib/clustering/BisectingKMeansModel.html"><code class="language-plaintext highlighter-rouge">BisectingKMeansModel</code> Java docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">java.util.Arrays</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">java.util.List</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaRDD</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.BisectingKMeans</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.clustering.BisectingKMeansModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vector</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span><span class="o">;</span>
<span class="nc">List</span><span class="o">&lt;</span><span class="nc">Vector</span><span class="o">&gt;</span> <span class="n">localData</span> <span class="o">=</span> <span class="nc">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span>
<span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">0.1</span><span class="o">,</span> <span class="mf">0.1</span><span class="o">),</span> <span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">0.3</span><span class="o">,</span> <span class="mf">0.3</span><span class="o">),</span>
<span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">10.1</span><span class="o">,</span> <span class="mf">10.1</span><span class="o">),</span> <span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">10.3</span><span class="o">,</span> <span class="mf">10.3</span><span class="o">),</span>
<span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">20.1</span><span class="o">,</span> <span class="mf">20.1</span><span class="o">),</span> <span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">20.3</span><span class="o">,</span> <span class="mf">20.3</span><span class="o">),</span>
<span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">30.1</span><span class="o">,</span> <span class="mf">30.1</span><span class="o">),</span> <span class="nc">Vectors</span><span class="o">.</span><span class="na">dense</span><span class="o">(</span><span class="mf">30.3</span><span class="o">,</span> <span class="mf">30.3</span><span class="o">)</span>
<span class="o">);</span>
<span class="nc">JavaRDD</span><span class="o">&lt;</span><span class="nc">Vector</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="na">parallelize</span><span class="o">(</span><span class="n">localData</span><span class="o">,</span> <span class="mi">2</span><span class="o">);</span>
<span class="nc">BisectingKMeans</span> <span class="n">bkm</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">BisectingKMeans</span><span class="o">()</span>
<span class="o">.</span><span class="na">setK</span><span class="o">(</span><span class="mi">4</span><span class="o">);</span>
<span class="nc">BisectingKMeansModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">bkm</span><span class="o">.</span><span class="na">run</span><span class="o">(</span><span class="n">data</span><span class="o">);</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Compute Cost: "</span> <span class="o">+</span> <span class="n">model</span><span class="o">.</span><span class="na">computeCost</span><span class="o">(</span><span class="n">data</span><span class="o">));</span>
<span class="nc">Vector</span><span class="o">[]</span> <span class="n">clusterCenters</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">clusterCenters</span><span class="o">();</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">clusterCenters</span><span class="o">.</span><span class="na">length</span><span class="o">;</span> <span class="n">i</span><span class="o">++)</span> <span class="o">{</span>
<span class="nc">Vector</span> <span class="n">clusterCenter</span> <span class="o">=</span> <span class="n">clusterCenters</span><span class="o">[</span><span class="n">i</span><span class="o">];</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Cluster Center "</span> <span class="o">+</span> <span class="n">i</span> <span class="o">+</span> <span class="s">": "</span> <span class="o">+</span> <span class="n">clusterCenter</span><span class="o">);</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/mllib/JavaBisectingKMeansExample.java" in the Spark repo.</small></div>
</div>
</div>
<h2 id="streaming-k-means">Streaming k-means</h2>
<p>When data arrive in a stream, we may want to estimate clusters dynamically,
updating them as new data arrive. <code class="language-plaintext highlighter-rouge">spark.mllib</code> provides support for streaming k-means clustering,
with parameters to control the decay (or &#8220;forgetfulness&#8221;) of the estimates. The algorithm
uses a generalization of the mini-batch k-means update rule. For each batch of data, we assign
all points to their nearest cluster, compute new cluster centers, then update each cluster using:</p>
<p><code class="language-plaintext highlighter-rouge">\begin{equation}
c_{t+1} = \frac{c_tn_t\alpha + x_tm_t}{n_t\alpha+m_t}
\end{equation}</code>
<code class="language-plaintext highlighter-rouge">\begin{equation}
n_{t+1} = n_t + m_t
\end{equation}</code></p>
<p>Where <code class="language-plaintext highlighter-rouge">$c_t$</code> is the previous center for the cluster, <code class="language-plaintext highlighter-rouge">$n_t$</code> is the number of points assigned
to the cluster thus far, <code class="language-plaintext highlighter-rouge">$x_t$</code> is the new cluster center from the current batch, and <code class="language-plaintext highlighter-rouge">$m_t$</code>
is the number of points added to the cluster in the current batch. The decay factor <code class="language-plaintext highlighter-rouge">$\alpha$</code>
can be used to ignore the past: with <code class="language-plaintext highlighter-rouge">$\alpha$=1</code> all data will be used from the beginning;
with <code class="language-plaintext highlighter-rouge">$\alpha$=0</code> only the most recent data will be used. This is analogous to an
exponentially-weighted moving average.</p>
<p>The decay can be specified using a <code class="language-plaintext highlighter-rouge">halfLife</code> parameter, which determines the
correct decay factor <code class="language-plaintext highlighter-rouge">a</code> such that, for data acquired
at time <code class="language-plaintext highlighter-rouge">t</code>, its contribution by time <code class="language-plaintext highlighter-rouge">t + halfLife</code> will have dropped to 0.5.
The unit of time can be specified either as <code class="language-plaintext highlighter-rouge">batches</code> or <code class="language-plaintext highlighter-rouge">points</code> and the update rule
will be adjusted accordingly.</p>
<p><strong>Examples</strong></p>
<p>This example shows how to estimate clusters on streaming data.</p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.mllib.clustering.StreamingKMeans.html"><code class="language-plaintext highlighter-rouge">StreamingKMeans</code> Python docs</a> for more details on the API.
And Refer to <a href="streaming-programming-guide.html#initializing">Spark Streaming Programming Guide</a> for details on StreamingContext.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.mllib.linalg</span> <span class="kn">import</span> <span class="n">Vectors</span>
<span class="kn">from</span> <span class="nn">pyspark.mllib.regression</span> <span class="kn">import</span> <span class="n">LabeledPoint</span>
<span class="kn">from</span> <span class="nn">pyspark.mllib.clustering</span> <span class="kn">import</span> <span class="n">StreamingKMeans</span>
<span class="c1"># we make an input stream of vectors for training,
# as well as a stream of vectors for testing
</span><span class="k">def</span> <span class="nf">parse</span><span class="p">(</span><span class="n">lp</span><span class="p">):</span>
<span class="n">label</span> <span class="o">=</span> <span class="nb">float</span><span class="p">(</span><span class="n">lp</span><span class="p">[</span><span class="n">lp</span><span class="p">.</span><span class="n">find</span><span class="p">(</span><span class="s">'('</span><span class="p">)</span> <span class="o">+</span> <span class="mi">1</span><span class="p">:</span> <span class="n">lp</span><span class="p">.</span><span class="n">find</span><span class="p">(</span><span class="s">')'</span><span class="p">)])</span>
<span class="n">vec</span> <span class="o">=</span> <span class="n">Vectors</span><span class="p">.</span><span class="n">dense</span><span class="p">(</span><span class="n">lp</span><span class="p">[</span><span class="n">lp</span><span class="p">.</span><span class="n">find</span><span class="p">(</span><span class="s">'['</span><span class="p">)</span> <span class="o">+</span> <span class="mi">1</span><span class="p">:</span> <span class="n">lp</span><span class="p">.</span><span class="n">find</span><span class="p">(</span><span class="s">']'</span><span class="p">)].</span><span class="n">split</span><span class="p">(</span><span class="s">','</span><span class="p">))</span>
<span class="k">return</span> <span class="n">LabeledPoint</span><span class="p">(</span><span class="n">label</span><span class="p">,</span> <span class="n">vec</span><span class="p">)</span>
<span class="n">trainingData</span> <span class="o">=</span> <span class="n">sc</span><span class="p">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">"data/mllib/kmeans_data.txt"</span><span class="p">)</span>\
<span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">line</span><span class="p">:</span> <span class="n">Vectors</span><span class="p">.</span><span class="n">dense</span><span class="p">([</span><span class="nb">float</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">line</span><span class="p">.</span><span class="n">strip</span><span class="p">().</span><span class="n">split</span><span class="p">(</span><span class="s">' '</span><span class="p">)]))</span>
<span class="n">testingData</span> <span class="o">=</span> <span class="n">sc</span><span class="p">.</span><span class="n">textFile</span><span class="p">(</span><span class="s">"data/mllib/streaming_kmeans_data_test.txt"</span><span class="p">).</span><span class="nb">map</span><span class="p">(</span><span class="n">parse</span><span class="p">)</span>
<span class="n">trainingQueue</span> <span class="o">=</span> <span class="p">[</span><span class="n">trainingData</span><span class="p">]</span>
<span class="n">testingQueue</span> <span class="o">=</span> <span class="p">[</span><span class="n">testingData</span><span class="p">]</span>
<span class="n">trainingStream</span> <span class="o">=</span> <span class="n">ssc</span><span class="p">.</span><span class="n">queueStream</span><span class="p">(</span><span class="n">trainingQueue</span><span class="p">)</span>
<span class="n">testingStream</span> <span class="o">=</span> <span class="n">ssc</span><span class="p">.</span><span class="n">queueStream</span><span class="p">(</span><span class="n">testingQueue</span><span class="p">)</span>
<span class="c1"># We create a model with random clusters and specify the number of clusters to find
</span><span class="n">model</span> <span class="o">=</span> <span class="n">StreamingKMeans</span><span class="p">(</span><span class="n">k</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">decayFactor</span><span class="o">=</span><span class="mf">1.0</span><span class="p">).</span><span class="n">setRandomCenters</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
<span class="c1"># Now register the streams for training and testing and start the job,
# printing the predicted cluster assignments on new data points as they arrive.
</span><span class="n">model</span><span class="p">.</span><span class="n">trainOn</span><span class="p">(</span><span class="n">trainingStream</span><span class="p">)</span>
<span class="n">result</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">predictOnValues</span><span class="p">(</span><span class="n">testingStream</span><span class="p">.</span><span class="nb">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">lp</span><span class="p">:</span> <span class="p">(</span><span class="n">lp</span><span class="p">.</span><span class="n">label</span><span class="p">,</span> <span class="n">lp</span><span class="p">.</span><span class="n">features</span><span class="p">)))</span>
<span class="n">result</span><span class="p">.</span><span class="n">pprint</span><span class="p">()</span>
<span class="n">ssc</span><span class="p">.</span><span class="n">start</span><span class="p">()</span>
<span class="n">ssc</span><span class="p">.</span><span class="n">stop</span><span class="p">(</span><span class="n">stopSparkContext</span><span class="o">=</span><span class="bp">True</span><span class="p">,</span> <span class="n">stopGraceFully</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/mllib/streaming_k_means_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/mllib/clustering/StreamingKMeans.html"><code class="language-plaintext highlighter-rouge">StreamingKMeans</code> Scala docs</a> for details on the API.
And Refer to <a href="streaming-programming-guide.html#initializing">Spark Streaming Programming Guide</a> for details on StreamingContext.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.mllib.clustering.StreamingKMeans</span>
<span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vectors</span>
<span class="k">import</span> <span class="nn">org.apache.spark.mllib.regression.LabeledPoint</span>
<span class="k">import</span> <span class="nn">org.apache.spark.streaming.</span><span class="o">{</span><span class="nc">Seconds</span><span class="o">,</span> <span class="nc">StreamingContext</span><span class="o">}</span>
<span class="k">val</span> <span class="nv">conf</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">SparkConf</span><span class="o">().</span><span class="py">setAppName</span><span class="o">(</span><span class="s">"StreamingKMeansExample"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">ssc</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">StreamingContext</span><span class="o">(</span><span class="n">conf</span><span class="o">,</span> <span class="nc">Seconds</span><span class="o">(</span><span class="nf">args</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span><span class="py">toLong</span><span class="o">))</span>
<span class="k">val</span> <span class="nv">trainingData</span> <span class="k">=</span> <span class="nv">ssc</span><span class="o">.</span><span class="py">textFileStream</span><span class="o">(</span><span class="nf">args</span><span class="o">(</span><span class="mi">0</span><span class="o">)).</span><span class="py">map</span><span class="o">(</span><span class="nv">Vectors</span><span class="o">.</span><span class="py">parse</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">testData</span> <span class="k">=</span> <span class="nv">ssc</span><span class="o">.</span><span class="py">textFileStream</span><span class="o">(</span><span class="nf">args</span><span class="o">(</span><span class="mi">1</span><span class="o">)).</span><span class="py">map</span><span class="o">(</span><span class="nv">LabeledPoint</span><span class="o">.</span><span class="py">parse</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">StreamingKMeans</span><span class="o">()</span>
<span class="o">.</span><span class="py">setK</span><span class="o">(</span><span class="nf">args</span><span class="o">(</span><span class="mi">3</span><span class="o">).</span><span class="py">toInt</span><span class="o">)</span>
<span class="o">.</span><span class="py">setDecayFactor</span><span class="o">(</span><span class="mf">1.0</span><span class="o">)</span>
<span class="o">.</span><span class="py">setRandomCenters</span><span class="o">(</span><span class="nf">args</span><span class="o">(</span><span class="mi">4</span><span class="o">).</span><span class="py">toInt</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">)</span>
<span class="nv">model</span><span class="o">.</span><span class="py">trainOn</span><span class="o">(</span><span class="n">trainingData</span><span class="o">)</span>
<span class="nv">model</span><span class="o">.</span><span class="py">predictOnValues</span><span class="o">(</span><span class="nv">testData</span><span class="o">.</span><span class="py">map</span><span class="o">(</span><span class="n">lp</span> <span class="k">=&gt;</span> <span class="o">(</span><span class="nv">lp</span><span class="o">.</span><span class="py">label</span><span class="o">,</span> <span class="nv">lp</span><span class="o">.</span><span class="py">features</span><span class="o">))).</span><span class="py">print</span><span class="o">()</span>
<span class="nv">ssc</span><span class="o">.</span><span class="py">start</span><span class="o">()</span>
<span class="nv">ssc</span><span class="o">.</span><span class="py">awaitTermination</span><span class="o">()</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/mllib/StreamingKMeansExample.scala" in the Spark repo.</small></div>
</div>
</div>
<p>As you add new text files with data the cluster centers will update. Each training
point should be formatted as <code class="language-plaintext highlighter-rouge">[x1, x2, x3]</code>, and each test data point
should be formatted as <code class="language-plaintext highlighter-rouge">(y, [x1, x2, x3])</code>, where <code class="language-plaintext highlighter-rouge">y</code> is some useful label or identifier
(e.g. a true category assignment). Anytime a text file is placed in <code class="language-plaintext highlighter-rouge">/training/data/dir</code>
the model will update. Anytime a text file is placed in <code class="language-plaintext highlighter-rouge">/testing/data/dir</code>
you will see predictions. With new data, the cluster centers will change!</p>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.5.1.min.js"></script>
<script src="js/vendor/bootstrap.bundle.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<script type="text/javascript" src="js/vendor/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:3.5.0"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>