blob: 8e2f9c884d7fb973cc8d95b3df6732ce918a4a43 [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Clustering - Spark 3.5.0 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&Courier+Prime:wght@400;700&display=swap" rel="stylesheet">
<link href="css/custom.css" rel="stylesheet">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<link rel="stylesheet" href="css/docsearch.min.css" />
<link rel="stylesheet" href="css/docsearch.css">
<!-- Matomo -->
<script type="text/javascript">
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body class="global">
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar navbar-expand-lg navbar-dark p-0 px-4 fixed-top" style="background: #1d6890;" id="topbar">
<div class="navbar-brand"><a href="index.html">
<img src="img/spark-logo-rev.svg" width="141" height="72"/></a><span class="version">3.5.0</span>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav me-auto">
<li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="quick-start.html">Quick Start</a>
<a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a>
<a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="api/java/index.html">Java</a>
<a class="dropdown-item" href="api/python/index.html">Python</a>
<a class="dropdown-item" href="api/R/index.html">R</a>
<a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="cluster-overview.html">Overview</a>
<a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="running-on-mesos.html">Mesos</a>
<a class="dropdown-item" href="running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="configuration.html">Configuration</a>
<a class="dropdown-item" href="monitoring.html">Monitoring</a>
<a class="dropdown-item" href="tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="security.html">Security</a>
<a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v3.5.0</span></span>-->
</div>
</nav>
<div class="container">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="ml-guide.html">MLlib: Main Guide</a></h3>
<ul>
<li>
<a href="ml-statistics.html">
Basic statistics
</a>
</li>
<li>
<a href="ml-datasource.html">
Data sources
</a>
</li>
<li>
<a href="ml-pipeline.html">
Pipelines
</a>
</li>
<li>
<a href="ml-features.html">
Extracting, transforming and selecting features
</a>
</li>
<li>
<a href="ml-classification-regression.html">
Classification and Regression
</a>
</li>
<li>
<a href="ml-clustering.html">
Clustering
</a>
</li>
<li>
<a href="ml-collaborative-filtering.html">
Collaborative filtering
</a>
</li>
<li>
<a href="ml-frequent-pattern-mining.html">
Frequent Pattern Mining
</a>
</li>
<li>
<a href="ml-tuning.html">
Model selection and tuning
</a>
</li>
<li>
<a href="ml-advanced.html">
Advanced topics
</a>
</li>
</ul>
<h3><a href="mllib-guide.html">MLlib: RDD-based API Guide</a></h3>
<ul>
<li>
<a href="mllib-data-types.html">
Data types
</a>
</li>
<li>
<a href="mllib-statistics.html">
Basic statistics
</a>
</li>
<li>
<a href="mllib-classification-regression.html">
Classification and regression
</a>
</li>
<li>
<a href="mllib-collaborative-filtering.html">
Collaborative filtering
</a>
</li>
<li>
<a href="mllib-clustering.html">
Clustering
</a>
</li>
<li>
<a href="mllib-dimensionality-reduction.html">
Dimensionality reduction
</a>
</li>
<li>
<a href="mllib-feature-extraction.html">
Feature extraction and transformation
</a>
</li>
<li>
<a href="mllib-frequent-pattern-mining.html">
Frequent pattern mining
</a>
</li>
<li>
<a href="mllib-evaluation-metrics.html">
Evaluation metrics
</a>
</li>
<li>
<a href="mllib-pmml-model-export.html">
PMML model export
</a>
</li>
<li>
<a href="mllib-optimization.html">
Optimization (developer)
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar mr-3" id="content">
<h1 class="title">Clustering</h1>
<p>This page describes clustering algorithms in MLlib.
The <a href="mllib-clustering.html">guide for clustering in the RDD-based API</a> also has relevant information
about these algorithms.</p>
<p><strong>Table of Contents</strong></p>
<ul id="markdown-toc">
<li><a href="#k-means" id="markdown-toc-k-means">K-means</a> <ul>
<li><a href="#input-columns" id="markdown-toc-input-columns">Input Columns</a></li>
<li><a href="#output-columns" id="markdown-toc-output-columns">Output Columns</a></li>
</ul>
</li>
<li><a href="#latent-dirichlet-allocation-lda" id="markdown-toc-latent-dirichlet-allocation-lda">Latent Dirichlet allocation (LDA)</a></li>
<li><a href="#bisecting-k-means" id="markdown-toc-bisecting-k-means">Bisecting k-means</a></li>
<li><a href="#gaussian-mixture-model-gmm" id="markdown-toc-gaussian-mixture-model-gmm">Gaussian Mixture Model (GMM)</a> <ul>
<li><a href="#input-columns-1" id="markdown-toc-input-columns-1">Input Columns</a></li>
<li><a href="#output-columns-1" id="markdown-toc-output-columns-1">Output Columns</a></li>
</ul>
</li>
<li><a href="#power-iteration-clustering-pic" id="markdown-toc-power-iteration-clustering-pic">Power Iteration Clustering (PIC)</a></li>
</ul>
<h2 id="k-means">K-means</h2>
<p><a href="http://en.wikipedia.org/wiki/K-means_clustering">k-means</a> is one of the
most commonly used clustering algorithms that clusters the data points into a
predefined number of clusters. The MLlib implementation includes a parallelized
variant of the <a href="http://en.wikipedia.org/wiki/K-means%2B%2B">k-means++</a> method
called <a href="http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf">kmeans||</a>.</p>
<p><code class="language-plaintext highlighter-rouge">KMeans</code> is implemented as an <code class="language-plaintext highlighter-rouge">Estimator</code> and generates a <code class="language-plaintext highlighter-rouge">KMeansModel</code> as the base model.</p>
<h3 id="input-columns">Input Columns</h3>
<table class="table table-striped">
<thead>
<tr>
<th align="left">Param name</th>
<th align="left">Type(s)</th>
<th align="left">Default</th>
<th align="left">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>featuresCol</td>
<td>Vector</td>
<td>"features"</td>
<td>Feature vector</td>
</tr>
</tbody>
</table>
<h3 id="output-columns">Output Columns</h3>
<table class="table table-striped">
<thead>
<tr>
<th align="left">Param name</th>
<th align="left">Type(s)</th>
<th align="left">Default</th>
<th align="left">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>predictionCol</td>
<td>Int</td>
<td>"prediction"</td>
<td>Predicted cluster center</td>
</tr>
</tbody>
</table>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.ml.clustering.KMeans.html">Python API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.ml.clustering</span> <span class="kn">import</span> <span class="n">KMeans</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.evaluation</span> <span class="kn">import</span> <span class="n">ClusteringEvaluator</span>
<span class="c1"># Loads data.
</span><span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"libsvm"</span><span class="p">).</span><span class="n">load</span><span class="p">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="p">)</span>
<span class="c1"># Trains a k-means model.
</span><span class="n">kmeans</span> <span class="o">=</span> <span class="n">KMeans</span><span class="p">().</span><span class="n">setK</span><span class="p">(</span><span class="mi">2</span><span class="p">).</span><span class="n">setSeed</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">kmeans</span><span class="p">.</span><span class="n">fit</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="c1"># Make predictions
</span><span class="n">predictions</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">transform</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="c1"># Evaluate clustering by computing Silhouette score
</span><span class="n">evaluator</span> <span class="o">=</span> <span class="n">ClusteringEvaluator</span><span class="p">()</span>
<span class="n">silhouette</span> <span class="o">=</span> <span class="n">evaluator</span><span class="p">.</span><span class="n">evaluate</span><span class="p">(</span><span class="n">predictions</span><span class="p">)</span>
<span class="k">print</span><span class="p">(</span><span class="s">"Silhouette with squared euclidean distance = "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">silhouette</span><span class="p">))</span>
<span class="c1"># Shows the result.
</span><span class="n">centers</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">clusterCenters</span><span class="p">()</span>
<span class="k">print</span><span class="p">(</span><span class="s">"Cluster Centers: "</span><span class="p">)</span>
<span class="k">for</span> <span class="n">center</span> <span class="ow">in</span> <span class="n">centers</span><span class="p">:</span>
<span class="k">print</span><span class="p">(</span><span class="n">center</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/ml/kmeans_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/ml/clustering/KMeans.html">Scala API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.ml.clustering.KMeans</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.evaluation.ClusteringEvaluator</span>
<span class="c1">// Loads data.</span>
<span class="k">val</span> <span class="nv">dataset</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">).</span><span class="py">load</span><span class="o">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="o">)</span>
<span class="c1">// Trains a k-means model.</span>
<span class="k">val</span> <span class="nv">kmeans</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">KMeans</span><span class="o">().</span><span class="py">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span><span class="py">setSeed</span><span class="o">(</span><span class="mi">1L</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="nv">kmeans</span><span class="o">.</span><span class="py">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="c1">// Make predictions</span>
<span class="k">val</span> <span class="nv">predictions</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">transform</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="c1">// Evaluate clustering by computing Silhouette score</span>
<span class="k">val</span> <span class="nv">evaluator</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">ClusteringEvaluator</span><span class="o">()</span>
<span class="k">val</span> <span class="nv">silhouette</span> <span class="k">=</span> <span class="nv">evaluator</span><span class="o">.</span><span class="py">evaluate</span><span class="o">(</span><span class="n">predictions</span><span class="o">)</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Silhouette with squared euclidean distance = $silhouette"</span><span class="o">)</span>
<span class="c1">// Shows the result.</span>
<span class="nf">println</span><span class="o">(</span><span class="s">"Cluster Centers: "</span><span class="o">)</span>
<span class="nv">model</span><span class="o">.</span><span class="py">clusterCenters</span><span class="o">.</span><span class="py">foreach</span><span class="o">(</span><span class="n">println</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/ml/KMeansExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/ml/clustering/KMeans.html">Java API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.KMeansModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.KMeans</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.evaluation.ClusteringEvaluator</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.linalg.Vector</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="c1">// Loads data.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">).</span><span class="na">load</span><span class="o">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="o">);</span>
<span class="c1">// Trains a k-means model.</span>
<span class="nc">KMeans</span> <span class="n">kmeans</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">KMeans</span><span class="o">().</span><span class="na">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span><span class="na">setSeed</span><span class="o">(</span><span class="mi">1L</span><span class="o">);</span>
<span class="nc">KMeansModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">kmeans</span><span class="o">.</span><span class="na">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="c1">// Make predictions</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">predictions</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">transform</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="c1">// Evaluate clustering by computing Silhouette score</span>
<span class="nc">ClusteringEvaluator</span> <span class="n">evaluator</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">ClusteringEvaluator</span><span class="o">();</span>
<span class="kt">double</span> <span class="n">silhouette</span> <span class="o">=</span> <span class="n">evaluator</span><span class="o">.</span><span class="na">evaluate</span><span class="o">(</span><span class="n">predictions</span><span class="o">);</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Silhouette with squared euclidean distance = "</span> <span class="o">+</span> <span class="n">silhouette</span><span class="o">);</span>
<span class="c1">// Shows the result.</span>
<span class="nc">Vector</span><span class="o">[]</span> <span class="n">centers</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">clusterCenters</span><span class="o">();</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Cluster Centers: "</span><span class="o">);</span>
<span class="k">for</span> <span class="o">(</span><span class="nc">Vector</span> <span class="nl">center:</span> <span class="n">centers</span><span class="o">)</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">center</span><span class="o">);</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/ml/JavaKMeansExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="r">
<p>Refer to the <a href="api/R/reference/spark.kmeans.html">R API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="c1"># Fit a k-means model with spark.kmeans</span><span class="w">
</span><span class="n">t</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">as.data.frame</span><span class="p">(</span><span class="n">Titanic</span><span class="p">)</span><span class="w">
</span><span class="n">training</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">createDataFrame</span><span class="p">(</span><span class="n">t</span><span class="p">)</span><span class="w">
</span><span class="n">df_list</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">randomSplit</span><span class="p">(</span><span class="n">training</span><span class="p">,</span><span class="w"> </span><span class="nf">c</span><span class="p">(</span><span class="m">7</span><span class="p">,</span><span class="m">3</span><span class="p">),</span><span class="w"> </span><span class="m">2</span><span class="p">)</span><span class="w">
</span><span class="n">kmeansDF</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">df_list</span><span class="p">[[</span><span class="m">1</span><span class="p">]]</span><span class="w">
</span><span class="n">kmeansTestDF</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">df_list</span><span class="p">[[</span><span class="m">2</span><span class="p">]]</span><span class="w">
</span><span class="n">kmeansModel</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">spark.kmeans</span><span class="p">(</span><span class="n">kmeansDF</span><span class="p">,</span><span class="w"> </span><span class="o">~</span><span class="w"> </span><span class="n">Class</span><span class="w"> </span><span class="o">+</span><span class="w"> </span><span class="n">Sex</span><span class="w"> </span><span class="o">+</span><span class="w"> </span><span class="n">Age</span><span class="w"> </span><span class="o">+</span><span class="w"> </span><span class="n">Freq</span><span class="p">,</span><span class="w">
</span><span class="n">k</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="m">3</span><span class="p">)</span><span class="w">
</span><span class="c1"># Model summary</span><span class="w">
</span><span class="n">summary</span><span class="p">(</span><span class="n">kmeansModel</span><span class="p">)</span><span class="w">
</span><span class="c1"># Get fitted result from the k-means model</span><span class="w">
</span><span class="n">head</span><span class="p">(</span><span class="n">fitted</span><span class="p">(</span><span class="n">kmeansModel</span><span class="p">))</span><span class="w">
</span><span class="c1"># Prediction</span><span class="w">
</span><span class="n">kmeansPredictions</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">predict</span><span class="p">(</span><span class="n">kmeansModel</span><span class="p">,</span><span class="w"> </span><span class="n">kmeansTestDF</span><span class="p">)</span><span class="w">
</span><span class="n">head</span><span class="p">(</span><span class="n">kmeansPredictions</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/r/ml/kmeans.R" in the Spark repo.</small></div>
</div>
</div>
<h2 id="latent-dirichlet-allocation-lda">Latent Dirichlet allocation (LDA)</h2>
<p><code class="language-plaintext highlighter-rouge">LDA</code> is implemented as an <code class="language-plaintext highlighter-rouge">Estimator</code> that supports both <code class="language-plaintext highlighter-rouge">EMLDAOptimizer</code> and <code class="language-plaintext highlighter-rouge">OnlineLDAOptimizer</code>,
and generates a <code class="language-plaintext highlighter-rouge">LDAModel</code> as the base model. Expert users may cast a <code class="language-plaintext highlighter-rouge">LDAModel</code> generated by
<code class="language-plaintext highlighter-rouge">EMLDAOptimizer</code> to a <code class="language-plaintext highlighter-rouge">DistributedLDAModel</code> if needed.</p>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.ml.clustering.LDA.html">Python API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.ml.clustering</span> <span class="kn">import</span> <span class="n">LDA</span>
<span class="c1"># Loads data.
</span><span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"libsvm"</span><span class="p">).</span><span class="n">load</span><span class="p">(</span><span class="s">"data/mllib/sample_lda_libsvm_data.txt"</span><span class="p">)</span>
<span class="c1"># Trains a LDA model.
</span><span class="n">lda</span> <span class="o">=</span> <span class="n">LDA</span><span class="p">(</span><span class="n">k</span><span class="o">=</span><span class="mi">10</span><span class="p">,</span> <span class="n">maxIter</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">lda</span><span class="p">.</span><span class="n">fit</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="n">ll</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">logLikelihood</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="n">lp</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">logPerplexity</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="k">print</span><span class="p">(</span><span class="s">"The lower bound on the log likelihood of the entire corpus: "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">ll</span><span class="p">))</span>
<span class="k">print</span><span class="p">(</span><span class="s">"The upper bound on perplexity: "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">lp</span><span class="p">))</span>
<span class="c1"># Describe topics.
</span><span class="n">topics</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">describeTopics</span><span class="p">(</span><span class="mi">3</span><span class="p">)</span>
<span class="k">print</span><span class="p">(</span><span class="s">"The topics described by their top-weighted terms:"</span><span class="p">)</span>
<span class="n">topics</span><span class="p">.</span><span class="n">show</span><span class="p">(</span><span class="n">truncate</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span>
<span class="c1"># Shows the result
</span><span class="n">transformed</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">transform</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="n">transformed</span><span class="p">.</span><span class="n">show</span><span class="p">(</span><span class="n">truncate</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/ml/lda_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/ml/clustering/LDA.html">Scala API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.ml.clustering.LDA</span>
<span class="c1">// Loads data.</span>
<span class="k">val</span> <span class="nv">dataset</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">)</span>
<span class="o">.</span><span class="py">load</span><span class="o">(</span><span class="s">"data/mllib/sample_lda_libsvm_data.txt"</span><span class="o">)</span>
<span class="c1">// Trains a LDA model.</span>
<span class="k">val</span> <span class="nv">lda</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">LDA</span><span class="o">().</span><span class="py">setK</span><span class="o">(</span><span class="mi">10</span><span class="o">).</span><span class="py">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="nv">lda</span><span class="o">.</span><span class="py">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">ll</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">logLikelihood</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">lp</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">logPerplexity</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"The lower bound on the log likelihood of the entire corpus: $ll"</span><span class="o">)</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"The upper bound on perplexity: $lp"</span><span class="o">)</span>
<span class="c1">// Describe topics.</span>
<span class="k">val</span> <span class="nv">topics</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">describeTopics</span><span class="o">(</span><span class="mi">3</span><span class="o">)</span>
<span class="nf">println</span><span class="o">(</span><span class="s">"The topics described by their top-weighted terms:"</span><span class="o">)</span>
<span class="nv">topics</span><span class="o">.</span><span class="py">show</span><span class="o">(</span><span class="kc">false</span><span class="o">)</span>
<span class="c1">// Shows the result.</span>
<span class="k">val</span> <span class="nv">transformed</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">transform</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="nv">transformed</span><span class="o">.</span><span class="py">show</span><span class="o">(</span><span class="kc">false</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/ml/LDAExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/ml/clustering/LDA.html">Java API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.LDA</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.LDAModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.SparkSession</span><span class="o">;</span>
<span class="c1">// Loads data.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">)</span>
<span class="o">.</span><span class="na">load</span><span class="o">(</span><span class="s">"data/mllib/sample_lda_libsvm_data.txt"</span><span class="o">);</span>
<span class="c1">// Trains a LDA model.</span>
<span class="no">LDA</span> <span class="n">lda</span> <span class="o">=</span> <span class="k">new</span> <span class="no">LDA</span><span class="o">().</span><span class="na">setK</span><span class="o">(</span><span class="mi">10</span><span class="o">).</span><span class="na">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">);</span>
<span class="nc">LDAModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">lda</span><span class="o">.</span><span class="na">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="kt">double</span> <span class="n">ll</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">logLikelihood</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="kt">double</span> <span class="n">lp</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">logPerplexity</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"The lower bound on the log likelihood of the entire corpus: "</span> <span class="o">+</span> <span class="n">ll</span><span class="o">);</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"The upper bound on perplexity: "</span> <span class="o">+</span> <span class="n">lp</span><span class="o">);</span>
<span class="c1">// Describe topics.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">topics</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">describeTopics</span><span class="o">(</span><span class="mi">3</span><span class="o">);</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"The topics described by their top-weighted terms:"</span><span class="o">);</span>
<span class="n">topics</span><span class="o">.</span><span class="na">show</span><span class="o">(</span><span class="kc">false</span><span class="o">);</span>
<span class="c1">// Shows the result.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">transformed</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">transform</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="n">transformed</span><span class="o">.</span><span class="na">show</span><span class="o">(</span><span class="kc">false</span><span class="o">);</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/ml/JavaLDAExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="r">
<p>Refer to the <a href="api/R/reference/spark.lda.html">R API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="c1"># Load training data</span><span class="w">
</span><span class="n">df</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">read.df</span><span class="p">(</span><span class="s2">"data/mllib/sample_lda_libsvm_data.txt"</span><span class="p">,</span><span class="w"> </span><span class="n">source</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"libsvm"</span><span class="p">)</span><span class="w">
</span><span class="n">training</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">df</span><span class="w">
</span><span class="n">test</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">df</span><span class="w">
</span><span class="c1"># Fit a latent dirichlet allocation model with spark.lda</span><span class="w">
</span><span class="n">model</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">spark.lda</span><span class="p">(</span><span class="n">training</span><span class="p">,</span><span class="w"> </span><span class="n">k</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="m">10</span><span class="p">,</span><span class="w"> </span><span class="n">maxIter</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="m">10</span><span class="p">)</span><span class="w">
</span><span class="c1"># Model summary</span><span class="w">
</span><span class="n">summary</span><span class="p">(</span><span class="n">model</span><span class="p">)</span><span class="w">
</span><span class="c1"># Posterior probabilities</span><span class="w">
</span><span class="n">posterior</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">spark.posterior</span><span class="p">(</span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">test</span><span class="p">)</span><span class="w">
</span><span class="n">head</span><span class="p">(</span><span class="n">posterior</span><span class="p">)</span><span class="w">
</span><span class="c1"># The log perplexity of the LDA model</span><span class="w">
</span><span class="n">logPerplexity</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">spark.perplexity</span><span class="p">(</span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">test</span><span class="p">)</span><span class="w">
</span><span class="n">print</span><span class="p">(</span><span class="n">paste0</span><span class="p">(</span><span class="s2">"The upper bound bound on perplexity: "</span><span class="p">,</span><span class="w"> </span><span class="n">logPerplexity</span><span class="p">))</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/r/ml/lda.R" in the Spark repo.</small></div>
</div>
</div>
<h2 id="bisecting-k-means">Bisecting k-means</h2>
<p>Bisecting k-means is a kind of <a href="https://en.wikipedia.org/wiki/Hierarchical_clustering">hierarchical clustering</a> using a
divisive (or &#8220;top-down&#8221;) approach: all observations start in one cluster, and splits are performed recursively as one
moves down the hierarchy.</p>
<p>Bisecting K-means can often be much faster than regular K-means, but it will generally produce a different clustering.</p>
<p><code class="language-plaintext highlighter-rouge">BisectingKMeans</code> is implemented as an <code class="language-plaintext highlighter-rouge">Estimator</code> and generates a <code class="language-plaintext highlighter-rouge">BisectingKMeansModel</code> as the base model.</p>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.ml.clustering.BisectingKMeans.html">Python API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.ml.clustering</span> <span class="kn">import</span> <span class="n">BisectingKMeans</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.evaluation</span> <span class="kn">import</span> <span class="n">ClusteringEvaluator</span>
<span class="c1"># Loads data.
</span><span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"libsvm"</span><span class="p">).</span><span class="n">load</span><span class="p">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="p">)</span>
<span class="c1"># Trains a bisecting k-means model.
</span><span class="n">bkm</span> <span class="o">=</span> <span class="n">BisectingKMeans</span><span class="p">().</span><span class="n">setK</span><span class="p">(</span><span class="mi">2</span><span class="p">).</span><span class="n">setSeed</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">bkm</span><span class="p">.</span><span class="n">fit</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="c1"># Make predictions
</span><span class="n">predictions</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">transform</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="c1"># Evaluate clustering by computing Silhouette score
</span><span class="n">evaluator</span> <span class="o">=</span> <span class="n">ClusteringEvaluator</span><span class="p">()</span>
<span class="n">silhouette</span> <span class="o">=</span> <span class="n">evaluator</span><span class="p">.</span><span class="n">evaluate</span><span class="p">(</span><span class="n">predictions</span><span class="p">)</span>
<span class="k">print</span><span class="p">(</span><span class="s">"Silhouette with squared euclidean distance = "</span> <span class="o">+</span> <span class="nb">str</span><span class="p">(</span><span class="n">silhouette</span><span class="p">))</span>
<span class="c1"># Shows the result.
</span><span class="k">print</span><span class="p">(</span><span class="s">"Cluster Centers: "</span><span class="p">)</span>
<span class="n">centers</span> <span class="o">=</span> <span class="n">model</span><span class="p">.</span><span class="n">clusterCenters</span><span class="p">()</span>
<span class="k">for</span> <span class="n">center</span> <span class="ow">in</span> <span class="n">centers</span><span class="p">:</span>
<span class="k">print</span><span class="p">(</span><span class="n">center</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/ml/bisecting_k_means_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/ml/clustering/BisectingKMeans.html">Scala API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.ml.clustering.BisectingKMeans</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.evaluation.ClusteringEvaluator</span>
<span class="c1">// Loads data.</span>
<span class="k">val</span> <span class="nv">dataset</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">).</span><span class="py">load</span><span class="o">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="o">)</span>
<span class="c1">// Trains a bisecting k-means model.</span>
<span class="k">val</span> <span class="nv">bkm</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">BisectingKMeans</span><span class="o">().</span><span class="py">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span><span class="py">setSeed</span><span class="o">(</span><span class="mi">1</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="nv">bkm</span><span class="o">.</span><span class="py">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="c1">// Make predictions</span>
<span class="k">val</span> <span class="nv">predictions</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">transform</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="c1">// Evaluate clustering by computing Silhouette score</span>
<span class="k">val</span> <span class="nv">evaluator</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">ClusteringEvaluator</span><span class="o">()</span>
<span class="k">val</span> <span class="nv">silhouette</span> <span class="k">=</span> <span class="nv">evaluator</span><span class="o">.</span><span class="py">evaluate</span><span class="o">(</span><span class="n">predictions</span><span class="o">)</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Silhouette with squared euclidean distance = $silhouette"</span><span class="o">)</span>
<span class="c1">// Shows the result.</span>
<span class="nf">println</span><span class="o">(</span><span class="s">"Cluster Centers: "</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">centers</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">clusterCenters</span>
<span class="nv">centers</span><span class="o">.</span><span class="py">foreach</span><span class="o">(</span><span class="n">println</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/ml/BisectingKMeansExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/ml/clustering/BisectingKMeans.html">Java API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.BisectingKMeans</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.BisectingKMeansModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.evaluation.ClusteringEvaluator</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.linalg.Vector</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="c1">// Loads data.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">).</span><span class="na">load</span><span class="o">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="o">);</span>
<span class="c1">// Trains a bisecting k-means model.</span>
<span class="nc">BisectingKMeans</span> <span class="n">bkm</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">BisectingKMeans</span><span class="o">().</span><span class="na">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span><span class="na">setSeed</span><span class="o">(</span><span class="mi">1</span><span class="o">);</span>
<span class="nc">BisectingKMeansModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">bkm</span><span class="o">.</span><span class="na">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="c1">// Make predictions</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">predictions</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">transform</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="c1">// Evaluate clustering by computing Silhouette score</span>
<span class="nc">ClusteringEvaluator</span> <span class="n">evaluator</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">ClusteringEvaluator</span><span class="o">();</span>
<span class="kt">double</span> <span class="n">silhouette</span> <span class="o">=</span> <span class="n">evaluator</span><span class="o">.</span><span class="na">evaluate</span><span class="o">(</span><span class="n">predictions</span><span class="o">);</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Silhouette with squared euclidean distance = "</span> <span class="o">+</span> <span class="n">silhouette</span><span class="o">);</span>
<span class="c1">// Shows the result.</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Cluster Centers: "</span><span class="o">);</span>
<span class="nc">Vector</span><span class="o">[]</span> <span class="n">centers</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">clusterCenters</span><span class="o">();</span>
<span class="k">for</span> <span class="o">(</span><span class="nc">Vector</span> <span class="n">center</span> <span class="o">:</span> <span class="n">centers</span><span class="o">)</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">center</span><span class="o">);</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/ml/JavaBisectingKMeansExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="r">
<p>Refer to the <a href="api/R/reference/spark.bisectingKmeans.html">R API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="n">t</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">as.data.frame</span><span class="p">(</span><span class="n">Titanic</span><span class="p">)</span><span class="w">
</span><span class="n">training</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">createDataFrame</span><span class="p">(</span><span class="n">t</span><span class="p">)</span><span class="w">
</span><span class="c1"># Fit bisecting k-means model with four centers</span><span class="w">
</span><span class="n">model</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">spark.bisectingKmeans</span><span class="p">(</span><span class="n">training</span><span class="p">,</span><span class="w"> </span><span class="n">Class</span><span class="w"> </span><span class="o">~</span><span class="w"> </span><span class="n">Survived</span><span class="p">,</span><span class="w"> </span><span class="n">k</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="m">4</span><span class="p">)</span><span class="w">
</span><span class="c1"># get fitted result from a bisecting k-means model</span><span class="w">
</span><span class="n">fitted.model</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">fitted</span><span class="p">(</span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="s2">"centers"</span><span class="p">)</span><span class="w">
</span><span class="c1"># Model summary</span><span class="w">
</span><span class="n">head</span><span class="p">(</span><span class="n">summary</span><span class="p">(</span><span class="n">fitted.model</span><span class="p">))</span><span class="w">
</span><span class="c1"># fitted values on training data</span><span class="w">
</span><span class="n">fitted</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">predict</span><span class="p">(</span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">training</span><span class="p">)</span><span class="w">
</span><span class="n">head</span><span class="p">(</span><span class="n">select</span><span class="p">(</span><span class="n">fitted</span><span class="p">,</span><span class="w"> </span><span class="s2">"Class"</span><span class="p">,</span><span class="w"> </span><span class="s2">"prediction"</span><span class="p">))</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/r/ml/bisectingKmeans.R" in the Spark repo.</small></div>
</div>
</div>
<h2 id="gaussian-mixture-model-gmm">Gaussian Mixture Model (GMM)</h2>
<p>A <a href="http://en.wikipedia.org/wiki/Mixture_model#Multivariate_Gaussian_mixture_model">Gaussian Mixture Model</a>
represents a composite distribution whereby points are drawn from one of <em>k</em> Gaussian sub-distributions,
each with its own probability. The <code class="language-plaintext highlighter-rouge">spark.ml</code> implementation uses the
<a href="http://en.wikipedia.org/wiki/Expectation%E2%80%93maximization_algorithm">expectation-maximization</a>
algorithm to induce the maximum-likelihood model given a set of samples.</p>
<p><code class="language-plaintext highlighter-rouge">GaussianMixture</code> is implemented as an <code class="language-plaintext highlighter-rouge">Estimator</code> and generates a <code class="language-plaintext highlighter-rouge">GaussianMixtureModel</code> as the base
model.</p>
<h3 id="input-columns-1">Input Columns</h3>
<table class="table table-striped">
<thead>
<tr>
<th align="left">Param name</th>
<th align="left">Type(s)</th>
<th align="left">Default</th>
<th align="left">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>featuresCol</td>
<td>Vector</td>
<td>"features"</td>
<td>Feature vector</td>
</tr>
</tbody>
</table>
<h3 id="output-columns-1">Output Columns</h3>
<table class="table table-striped">
<thead>
<tr>
<th align="left">Param name</th>
<th align="left">Type(s)</th>
<th align="left">Default</th>
<th align="left">Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>predictionCol</td>
<td>Int</td>
<td>"prediction"</td>
<td>Predicted cluster center</td>
</tr>
<tr>
<td>probabilityCol</td>
<td>Vector</td>
<td>"probability"</td>
<td>Probability of each cluster</td>
</tr>
</tbody>
</table>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.ml.clustering.GaussianMixture.html">Python API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.ml.clustering</span> <span class="kn">import</span> <span class="n">GaussianMixture</span>
<span class="c1"># loads data
</span><span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"libsvm"</span><span class="p">).</span><span class="n">load</span><span class="p">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="p">)</span>
<span class="n">gmm</span> <span class="o">=</span> <span class="n">GaussianMixture</span><span class="p">().</span><span class="n">setK</span><span class="p">(</span><span class="mi">2</span><span class="p">).</span><span class="n">setSeed</span><span class="p">(</span><span class="mi">538009335</span><span class="p">)</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">gmm</span><span class="p">.</span><span class="n">fit</span><span class="p">(</span><span class="n">dataset</span><span class="p">)</span>
<span class="k">print</span><span class="p">(</span><span class="s">"Gaussians shown as a DataFrame: "</span><span class="p">)</span>
<span class="n">model</span><span class="p">.</span><span class="n">gaussiansDF</span><span class="p">.</span><span class="n">show</span><span class="p">(</span><span class="n">truncate</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/ml/gaussian_mixture_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/ml/clustering/GaussianMixture.html">Scala API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.ml.clustering.GaussianMixture</span>
<span class="c1">// Loads data</span>
<span class="k">val</span> <span class="nv">dataset</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">).</span><span class="py">load</span><span class="o">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="o">)</span>
<span class="c1">// Trains Gaussian Mixture Model</span>
<span class="k">val</span> <span class="nv">gmm</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">GaussianMixture</span><span class="o">()</span>
<span class="o">.</span><span class="py">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="nv">gmm</span><span class="o">.</span><span class="py">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">)</span>
<span class="c1">// output parameters of mixture model model</span>
<span class="nf">for</span> <span class="o">(</span><span class="n">i</span> <span class="k">&lt;-</span> <span class="mi">0</span> <span class="n">until</span> <span class="nv">model</span><span class="o">.</span><span class="py">getK</span><span class="o">)</span> <span class="o">{</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Gaussian $i:\nweight=${model.weights(i)}\n"</span> <span class="o">+</span>
<span class="n">s</span><span class="s">"mu=${model.gaussians(i).mean}\nsigma=\n${model.gaussians(i).cov}\n"</span><span class="o">)</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/ml/GaussianMixtureExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/ml/clustering/GaussianMixture.html">Java API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.GaussianMixture</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.GaussianMixtureModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="c1">// Loads data</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">dataset</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">).</span><span class="na">load</span><span class="o">(</span><span class="s">"data/mllib/sample_kmeans_data.txt"</span><span class="o">);</span>
<span class="c1">// Trains a GaussianMixture model</span>
<span class="nc">GaussianMixture</span> <span class="n">gmm</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">GaussianMixture</span><span class="o">()</span>
<span class="o">.</span><span class="na">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">);</span>
<span class="nc">GaussianMixtureModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">gmm</span><span class="o">.</span><span class="na">fit</span><span class="o">(</span><span class="n">dataset</span><span class="o">);</span>
<span class="c1">// Output the parameters of the mixture model</span>
<span class="k">for</span> <span class="o">(</span><span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="o">;</span> <span class="n">i</span> <span class="o">&lt;</span> <span class="n">model</span><span class="o">.</span><span class="na">getK</span><span class="o">();</span> <span class="n">i</span><span class="o">++)</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">printf</span><span class="o">(</span><span class="s">"Gaussian %d:\nweight=%f\nmu=%s\nsigma=\n%s\n\n"</span><span class="o">,</span>
<span class="n">i</span><span class="o">,</span> <span class="n">model</span><span class="o">.</span><span class="na">weights</span><span class="o">()[</span><span class="n">i</span><span class="o">],</span> <span class="n">model</span><span class="o">.</span><span class="na">gaussians</span><span class="o">()[</span><span class="n">i</span><span class="o">].</span><span class="na">mean</span><span class="o">(),</span> <span class="n">model</span><span class="o">.</span><span class="na">gaussians</span><span class="o">()[</span><span class="n">i</span><span class="o">].</span><span class="na">cov</span><span class="o">());</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/ml/JavaGaussianMixtureExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="r">
<p>Refer to the <a href="api/R/reference/spark.gaussianMixture.html">R API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="c1"># Load training data</span><span class="w">
</span><span class="n">df</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">read.df</span><span class="p">(</span><span class="s2">"data/mllib/sample_kmeans_data.txt"</span><span class="p">,</span><span class="w"> </span><span class="n">source</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"libsvm"</span><span class="p">)</span><span class="w">
</span><span class="n">training</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">df</span><span class="w">
</span><span class="n">test</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">df</span><span class="w">
</span><span class="c1"># Fit a gaussian mixture clustering model with spark.gaussianMixture</span><span class="w">
</span><span class="n">model</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">spark.gaussianMixture</span><span class="p">(</span><span class="n">training</span><span class="p">,</span><span class="w"> </span><span class="o">~</span><span class="w"> </span><span class="n">features</span><span class="p">,</span><span class="w"> </span><span class="n">k</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="m">2</span><span class="p">)</span><span class="w">
</span><span class="c1"># Model summary</span><span class="w">
</span><span class="n">summary</span><span class="p">(</span><span class="n">model</span><span class="p">)</span><span class="w">
</span><span class="c1"># Prediction</span><span class="w">
</span><span class="n">predictions</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">predict</span><span class="p">(</span><span class="n">model</span><span class="p">,</span><span class="w"> </span><span class="n">test</span><span class="p">)</span><span class="w">
</span><span class="n">head</span><span class="p">(</span><span class="n">predictions</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/r/ml/gaussianMixture.R" in the Spark repo.</small></div>
</div>
</div>
<h2 id="power-iteration-clustering-pic">Power Iteration Clustering (PIC)</h2>
<p>Power Iteration Clustering (PIC) is a scalable graph clustering algorithm
developed by <a href="http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf">Lin and Cohen</a>.
From the abstract: PIC finds a very low-dimensional embedding of a dataset
using truncated power iteration on a normalized pair-wise similarity matrix of the data.</p>
<p><code class="language-plaintext highlighter-rouge">spark.ml</code>&#8217;s PowerIterationClustering implementation takes the following parameters:</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">k</code>: the number of clusters to create</li>
<li><code class="language-plaintext highlighter-rouge">initMode</code>: param for the initialization algorithm</li>
<li><code class="language-plaintext highlighter-rouge">maxIter</code>: param for maximum number of iterations</li>
<li><code class="language-plaintext highlighter-rouge">srcCol</code>: param for the name of the input column for source vertex IDs</li>
<li><code class="language-plaintext highlighter-rouge">dstCol</code>: name of the input column for destination vertex IDs</li>
<li><code class="language-plaintext highlighter-rouge">weightCol</code>: Param for weight column name</li>
</ul>
<p><strong>Examples</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.ml.clustering.PowerIterationClustering.html">Python API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.ml.clustering</span> <span class="kn">import</span> <span class="n">PowerIterationClustering</span>
<span class="n">df</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">createDataFrame</span><span class="p">([</span>
<span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mf">0.1</span><span class="p">)</span>
<span class="p">],</span> <span class="p">[</span><span class="s">"src"</span><span class="p">,</span> <span class="s">"dst"</span><span class="p">,</span> <span class="s">"weight"</span><span class="p">])</span>
<span class="n">pic</span> <span class="o">=</span> <span class="n">PowerIterationClustering</span><span class="p">(</span><span class="n">k</span><span class="o">=</span><span class="mi">2</span><span class="p">,</span> <span class="n">maxIter</span><span class="o">=</span><span class="mi">20</span><span class="p">,</span> <span class="n">initMode</span><span class="o">=</span><span class="s">"degree"</span><span class="p">,</span> <span class="n">weightCol</span><span class="o">=</span><span class="s">"weight"</span><span class="p">)</span>
<span class="c1"># Shows the cluster assignment
</span><span class="n">pic</span><span class="p">.</span><span class="n">assignClusters</span><span class="p">(</span><span class="n">df</span><span class="p">).</span><span class="n">show</span><span class="p">()</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/ml/power_iteration_clustering_example.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/ml/clustering/PowerIterationClustering.html">Scala API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.ml.clustering.PowerIterationClustering</span>
<span class="k">val</span> <span class="nv">dataset</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">createDataFrame</span><span class="o">(</span><span class="nc">Seq</span><span class="o">(</span>
<span class="o">(</span><span class="mi">0L</span><span class="o">,</span> <span class="mi">1L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">0L</span><span class="o">,</span> <span class="mi">2L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">1L</span><span class="o">,</span> <span class="mi">2L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">3L</span><span class="o">,</span> <span class="mi">4L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">4L</span><span class="o">,</span> <span class="mi">0L</span><span class="o">,</span> <span class="mf">0.1</span><span class="o">)</span>
<span class="o">)).</span><span class="py">toDF</span><span class="o">(</span><span class="s">"src"</span><span class="o">,</span> <span class="s">"dst"</span><span class="o">,</span> <span class="s">"weight"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">PowerIterationClustering</span><span class="o">().</span>
<span class="nf">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">).</span>
<span class="nf">setMaxIter</span><span class="o">(</span><span class="mi">20</span><span class="o">).</span>
<span class="nf">setInitMode</span><span class="o">(</span><span class="s">"degree"</span><span class="o">).</span>
<span class="nf">setWeightCol</span><span class="o">(</span><span class="s">"weight"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">prediction</span> <span class="k">=</span> <span class="nv">model</span><span class="o">.</span><span class="py">assignClusters</span><span class="o">(</span><span class="n">dataset</span><span class="o">).</span><span class="py">select</span><span class="o">(</span><span class="s">"id"</span><span class="o">,</span> <span class="s">"cluster"</span><span class="o">)</span>
<span class="c1">// Shows the cluster assignment</span>
<span class="nv">prediction</span><span class="o">.</span><span class="py">show</span><span class="o">(</span><span class="kc">false</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/ml/PowerIterationClusteringExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/ml/clustering/PowerIterationClustering.html">Java API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">java.util.Arrays</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">java.util.List</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.clustering.PowerIterationClustering</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.RowFactory</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.SparkSession</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.types.DataTypes</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.types.Metadata</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.types.StructField</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.types.StructType</span><span class="o">;</span>
<span class="nc">List</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="nc">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span>
<span class="nc">RowFactory</span><span class="o">.</span><span class="na">create</span><span class="o">(</span><span class="mi">0L</span><span class="o">,</span> <span class="mi">1L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="nc">RowFactory</span><span class="o">.</span><span class="na">create</span><span class="o">(</span><span class="mi">0L</span><span class="o">,</span> <span class="mi">2L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="nc">RowFactory</span><span class="o">.</span><span class="na">create</span><span class="o">(</span><span class="mi">1L</span><span class="o">,</span> <span class="mi">2L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="nc">RowFactory</span><span class="o">.</span><span class="na">create</span><span class="o">(</span><span class="mi">3L</span><span class="o">,</span> <span class="mi">4L</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="nc">RowFactory</span><span class="o">.</span><span class="na">create</span><span class="o">(</span><span class="mi">4L</span><span class="o">,</span> <span class="mi">0L</span><span class="o">,</span> <span class="mf">0.1</span><span class="o">)</span>
<span class="o">);</span>
<span class="nc">StructType</span> <span class="n">schema</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">StructType</span><span class="o">(</span><span class="k">new</span> <span class="nc">StructField</span><span class="o">[]{</span>
<span class="k">new</span> <span class="nf">StructField</span><span class="o">(</span><span class="s">"src"</span><span class="o">,</span> <span class="nc">DataTypes</span><span class="o">.</span><span class="na">LongType</span><span class="o">,</span> <span class="kc">false</span><span class="o">,</span> <span class="nc">Metadata</span><span class="o">.</span><span class="na">empty</span><span class="o">()),</span>
<span class="k">new</span> <span class="nf">StructField</span><span class="o">(</span><span class="s">"dst"</span><span class="o">,</span> <span class="nc">DataTypes</span><span class="o">.</span><span class="na">LongType</span><span class="o">,</span> <span class="kc">false</span><span class="o">,</span> <span class="nc">Metadata</span><span class="o">.</span><span class="na">empty</span><span class="o">()),</span>
<span class="k">new</span> <span class="nf">StructField</span><span class="o">(</span><span class="s">"weight"</span><span class="o">,</span> <span class="nc">DataTypes</span><span class="o">.</span><span class="na">DoubleType</span><span class="o">,</span> <span class="kc">false</span><span class="o">,</span> <span class="nc">Metadata</span><span class="o">.</span><span class="na">empty</span><span class="o">())</span>
<span class="o">});</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">df</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">createDataFrame</span><span class="o">(</span><span class="n">data</span><span class="o">,</span> <span class="n">schema</span><span class="o">);</span>
<span class="nc">PowerIterationClustering</span> <span class="n">model</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">PowerIterationClustering</span><span class="o">()</span>
<span class="o">.</span><span class="na">setK</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span>
<span class="o">.</span><span class="na">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">)</span>
<span class="o">.</span><span class="na">setInitMode</span><span class="o">(</span><span class="s">"degree"</span><span class="o">)</span>
<span class="o">.</span><span class="na">setWeightCol</span><span class="o">(</span><span class="s">"weight"</span><span class="o">);</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">result</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="na">assignClusters</span><span class="o">(</span><span class="n">df</span><span class="o">);</span>
<span class="n">result</span><span class="o">.</span><span class="na">show</span><span class="o">(</span><span class="kc">false</span><span class="o">);</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/ml/JavaPowerIterationClusteringExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="r">
<p>Refer to the <a href="api/R/reference/spark.powerIterationClustering.html">R API docs</a> for more details.</p>
<div class="highlight"><pre class="codehilite"><code><span class="n">df</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">createDataFrame</span><span class="p">(</span><span class="nf">list</span><span class="p">(</span><span class="nf">list</span><span class="p">(</span><span class="m">0L</span><span class="p">,</span><span class="w"> </span><span class="m">1L</span><span class="p">,</span><span class="w"> </span><span class="m">1.0</span><span class="p">),</span><span class="w"> </span><span class="nf">list</span><span class="p">(</span><span class="m">0L</span><span class="p">,</span><span class="w"> </span><span class="m">2L</span><span class="p">,</span><span class="w"> </span><span class="m">1.0</span><span class="p">),</span><span class="w">
</span><span class="nf">list</span><span class="p">(</span><span class="m">1L</span><span class="p">,</span><span class="w"> </span><span class="m">2L</span><span class="p">,</span><span class="w"> </span><span class="m">1.0</span><span class="p">),</span><span class="w"> </span><span class="nf">list</span><span class="p">(</span><span class="m">3L</span><span class="p">,</span><span class="w"> </span><span class="m">4L</span><span class="p">,</span><span class="w"> </span><span class="m">1.0</span><span class="p">),</span><span class="w">
</span><span class="nf">list</span><span class="p">(</span><span class="m">4L</span><span class="p">,</span><span class="w"> </span><span class="m">0L</span><span class="p">,</span><span class="w"> </span><span class="m">0.1</span><span class="p">)),</span><span class="w">
</span><span class="n">schema</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="nf">c</span><span class="p">(</span><span class="s2">"src"</span><span class="p">,</span><span class="w"> </span><span class="s2">"dst"</span><span class="p">,</span><span class="w"> </span><span class="s2">"weight"</span><span class="p">))</span><span class="w">
</span><span class="c1"># assign clusters</span><span class="w">
</span><span class="n">clusters</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">spark.assignClusters</span><span class="p">(</span><span class="n">df</span><span class="p">,</span><span class="w"> </span><span class="n">k</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="m">2L</span><span class="p">,</span><span class="w"> </span><span class="n">maxIter</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="m">20L</span><span class="p">,</span><span class="w">
</span><span class="n">initMode</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"degree"</span><span class="p">,</span><span class="w"> </span><span class="n">weightCol</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"weight"</span><span class="p">)</span><span class="w">
</span><span class="n">showDF</span><span class="p">(</span><span class="n">arrange</span><span class="p">(</span><span class="n">clusters</span><span class="p">,</span><span class="w"> </span><span class="n">clusters</span><span class="o">$</span><span class="n">id</span><span class="p">))</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/r/ml/powerIterationClustering.R" in the Spark repo.</small></div>
</div>
</div>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.5.1.min.js"></script>
<script src="js/vendor/bootstrap.bundle.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<script type="text/javascript" src="js/vendor/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:3.5.0"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>