blob: 4a41e7a97e16e7e6fd03a7f0f77f0b1ba08ec910 [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>ML Tuning - Spark 3.5.0 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&Courier+Prime:wght@400;700&display=swap" rel="stylesheet">
<link href="css/custom.css" rel="stylesheet">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<link rel="stylesheet" href="css/docsearch.min.css" />
<link rel="stylesheet" href="css/docsearch.css">
<!-- Matomo -->
<script type="text/javascript">
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body class="global">
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar navbar-expand-lg navbar-dark p-0 px-4 fixed-top" style="background: #1d6890;" id="topbar">
<div class="navbar-brand"><a href="index.html">
<img src="img/spark-logo-rev.svg" width="141" height="72"/></a><span class="version">3.5.0</span>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav me-auto">
<li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="quick-start.html">Quick Start</a>
<a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a>
<a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="api/java/index.html">Java</a>
<a class="dropdown-item" href="api/python/index.html">Python</a>
<a class="dropdown-item" href="api/R/index.html">R</a>
<a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="cluster-overview.html">Overview</a>
<a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="running-on-mesos.html">Mesos</a>
<a class="dropdown-item" href="running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="configuration.html">Configuration</a>
<a class="dropdown-item" href="monitoring.html">Monitoring</a>
<a class="dropdown-item" href="tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="security.html">Security</a>
<a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v3.5.0</span></span>-->
</div>
</nav>
<div class="container">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="ml-guide.html">MLlib: Main Guide</a></h3>
<ul>
<li>
<a href="ml-statistics.html">
Basic statistics
</a>
</li>
<li>
<a href="ml-datasource.html">
Data sources
</a>
</li>
<li>
<a href="ml-pipeline.html">
Pipelines
</a>
</li>
<li>
<a href="ml-features.html">
Extracting, transforming and selecting features
</a>
</li>
<li>
<a href="ml-classification-regression.html">
Classification and Regression
</a>
</li>
<li>
<a href="ml-clustering.html">
Clustering
</a>
</li>
<li>
<a href="ml-collaborative-filtering.html">
Collaborative filtering
</a>
</li>
<li>
<a href="ml-frequent-pattern-mining.html">
Frequent Pattern Mining
</a>
</li>
<li>
<a href="ml-tuning.html">
Model selection and tuning
</a>
</li>
<li>
<a href="ml-advanced.html">
Advanced topics
</a>
</li>
</ul>
<h3><a href="mllib-guide.html">MLlib: RDD-based API Guide</a></h3>
<ul>
<li>
<a href="mllib-data-types.html">
Data types
</a>
</li>
<li>
<a href="mllib-statistics.html">
Basic statistics
</a>
</li>
<li>
<a href="mllib-classification-regression.html">
Classification and regression
</a>
</li>
<li>
<a href="mllib-collaborative-filtering.html">
Collaborative filtering
</a>
</li>
<li>
<a href="mllib-clustering.html">
Clustering
</a>
</li>
<li>
<a href="mllib-dimensionality-reduction.html">
Dimensionality reduction
</a>
</li>
<li>
<a href="mllib-feature-extraction.html">
Feature extraction and transformation
</a>
</li>
<li>
<a href="mllib-frequent-pattern-mining.html">
Frequent pattern mining
</a>
</li>
<li>
<a href="mllib-evaluation-metrics.html">
Evaluation metrics
</a>
</li>
<li>
<a href="mllib-pmml-model-export.html">
PMML model export
</a>
</li>
<li>
<a href="mllib-optimization.html">
Optimization (developer)
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar mr-3" id="content">
<h1 class="title">ML Tuning: model selection and hyperparameter tuning</h1>
<p><code class="language-plaintext highlighter-rouge">\[
\newcommand{\R}{\mathbb{R}}
\newcommand{\E}{\mathbb{E}}
\newcommand{\x}{\mathbf{x}}
\newcommand{\y}{\mathbf{y}}
\newcommand{\wv}{\mathbf{w}}
\newcommand{\av}{\mathbf{\alpha}}
\newcommand{\bv}{\mathbf{b}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\id}{\mathbf{I}}
\newcommand{\ind}{\mathbf{1}}
\newcommand{\0}{\mathbf{0}}
\newcommand{\unit}{\mathbf{e}}
\newcommand{\one}{\mathbf{1}}
\newcommand{\zero}{\mathbf{0}}
\]</code></p>
<p>This section describes how to use MLlib&#8217;s tooling for tuning ML algorithms and Pipelines.
Built-in Cross-Validation and other tooling allow users to optimize hyperparameters in algorithms and Pipelines.</p>
<p><strong>Table of contents</strong></p>
<ul id="markdown-toc">
<li><a href="#model-selection-aka-hyperparameter-tuning" id="markdown-toc-model-selection-aka-hyperparameter-tuning">Model selection (a.k.a. hyperparameter tuning)</a></li>
<li><a href="#cross-validation" id="markdown-toc-cross-validation">Cross-Validation</a></li>
<li><a href="#train-validation-split" id="markdown-toc-train-validation-split">Train-Validation Split</a></li>
</ul>
<h1 id="model-selection-aka-hyperparameter-tuning">Model selection (a.k.a. hyperparameter tuning)</h1>
<p>An important task in ML is <em>model selection</em>, or using data to find the best model or parameters for a given task. This is also called <em>tuning</em>.
Tuning may be done for individual <code class="language-plaintext highlighter-rouge">Estimator</code>s such as <code class="language-plaintext highlighter-rouge">LogisticRegression</code>, or for entire <code class="language-plaintext highlighter-rouge">Pipeline</code>s which include multiple algorithms, featurization, and other steps. Users can tune an entire <code class="language-plaintext highlighter-rouge">Pipeline</code> at once, rather than tuning each element in the <code class="language-plaintext highlighter-rouge">Pipeline</code> separately.</p>
<p>MLlib supports model selection using tools such as <a href="api/scala/org/apache/spark/ml/tuning/CrossValidator.html"><code class="language-plaintext highlighter-rouge">CrossValidator</code></a> and <a href="api/scala/org/apache/spark/ml/tuning/TrainValidationSplit.html"><code class="language-plaintext highlighter-rouge">TrainValidationSplit</code></a>.
These tools require the following items:</p>
<ul>
<li><a href="api/scala/org/apache/spark/ml/Estimator.html"><code class="language-plaintext highlighter-rouge">Estimator</code></a>: algorithm or <code class="language-plaintext highlighter-rouge">Pipeline</code> to tune</li>
<li>Set of <code class="language-plaintext highlighter-rouge">ParamMap</code>s: parameters to choose from, sometimes called a &#8220;parameter grid&#8221; to search over</li>
<li><a href="api/scala/org/apache/spark/ml/evaluation/Evaluator.html"><code class="language-plaintext highlighter-rouge">Evaluator</code></a>: metric to measure how well a fitted <code class="language-plaintext highlighter-rouge">Model</code> does on held-out test data</li>
</ul>
<p>At a high level, these model selection tools work as follows:</p>
<ul>
<li>They split the input data into separate training and test datasets.</li>
<li>For each (training, test) pair, they iterate through the set of <code class="language-plaintext highlighter-rouge">ParamMap</code>s:
<ul>
<li>For each <code class="language-plaintext highlighter-rouge">ParamMap</code>, they fit the <code class="language-plaintext highlighter-rouge">Estimator</code> using those parameters, get the fitted <code class="language-plaintext highlighter-rouge">Model</code>, and evaluate the <code class="language-plaintext highlighter-rouge">Model</code>&#8217;s performance using the <code class="language-plaintext highlighter-rouge">Evaluator</code>.</li>
</ul>
</li>
<li>They select the <code class="language-plaintext highlighter-rouge">Model</code> produced by the best-performing set of parameters.</li>
</ul>
<p>The <code class="language-plaintext highlighter-rouge">Evaluator</code> can be a <a href="api/scala/org/apache/spark/ml/evaluation/RegressionEvaluator.html"><code class="language-plaintext highlighter-rouge">RegressionEvaluator</code></a>
for regression problems, a <a href="api/scala/org/apache/spark/ml/evaluation/BinaryClassificationEvaluator.html"><code class="language-plaintext highlighter-rouge">BinaryClassificationEvaluator</code></a>
for binary data, a <a href="api/scala/org/apache/spark/ml/evaluation/MulticlassClassificationEvaluator.html"><code class="language-plaintext highlighter-rouge">MulticlassClassificationEvaluator</code></a>
for multiclass problems, a <a href="api/scala/org/apache/spark/ml/evaluation/MultilabelClassificationEvaluator.html"><code class="language-plaintext highlighter-rouge">MultilabelClassificationEvaluator</code></a>
for multi-label classifications, or a
<a href="api/scala/org/apache/spark/ml/evaluation/RankingEvaluator.html"><code class="language-plaintext highlighter-rouge">RankingEvaluator</code></a> for ranking problems. The default metric used to
choose the best <code class="language-plaintext highlighter-rouge">ParamMap</code> can be overridden by the <code class="language-plaintext highlighter-rouge">setMetricName</code> method in each of these evaluators.</p>
<p>To help construct the parameter grid, users can use the <a href="api/scala/org/apache/spark/ml/tuning/ParamGridBuilder.html"><code class="language-plaintext highlighter-rouge">ParamGridBuilder</code></a> utility.
By default, sets of parameters from the parameter grid are evaluated in serial. Parameter evaluation can be done in parallel by setting <code class="language-plaintext highlighter-rouge">parallelism</code> with a value of 2 or more (a value of 1 will be serial) before running model selection with <code class="language-plaintext highlighter-rouge">CrossValidator</code> or <code class="language-plaintext highlighter-rouge">TrainValidationSplit</code>.
The value of <code class="language-plaintext highlighter-rouge">parallelism</code> should be chosen carefully to maximize parallelism without exceeding cluster resources, and larger values may not always lead to improved performance. Generally speaking, a value up to 10 should be sufficient for most clusters.</p>
<h1 id="cross-validation">Cross-Validation</h1>
<p><code class="language-plaintext highlighter-rouge">CrossValidator</code> begins by splitting the dataset into a set of <em>folds</em> which are used as separate training and test datasets. E.g., with <code class="language-plaintext highlighter-rouge">$k=3$</code> folds, <code class="language-plaintext highlighter-rouge">CrossValidator</code> will generate 3 (training, test) dataset pairs, each of which uses 2/3 of the data for training and 1/3 for testing. To evaluate a particular <code class="language-plaintext highlighter-rouge">ParamMap</code>, <code class="language-plaintext highlighter-rouge">CrossValidator</code> computes the average evaluation metric for the 3 <code class="language-plaintext highlighter-rouge">Model</code>s produced by fitting the <code class="language-plaintext highlighter-rouge">Estimator</code> on the 3 different (training, test) dataset pairs.</p>
<p>After identifying the best <code class="language-plaintext highlighter-rouge">ParamMap</code>, <code class="language-plaintext highlighter-rouge">CrossValidator</code> finally re-fits the <code class="language-plaintext highlighter-rouge">Estimator</code> using the best <code class="language-plaintext highlighter-rouge">ParamMap</code> and the entire dataset.</p>
<p><strong>Examples: model selection via cross-validation</strong></p>
<p>The following example demonstrates using <code class="language-plaintext highlighter-rouge">CrossValidator</code> to select from a grid of parameters.</p>
<p>Note that cross-validation over a grid of parameters is expensive.
E.g., in the example below, the parameter grid has 3 values for <code class="language-plaintext highlighter-rouge">hashingTF.numFeatures</code> and 2 values for <code class="language-plaintext highlighter-rouge">lr.regParam</code>, and <code class="language-plaintext highlighter-rouge">CrossValidator</code> uses 2 folds. This multiplies out to <code class="language-plaintext highlighter-rouge">$(3 \times 2) \times 2 = 12$</code> different models being trained.
In realistic settings, it can be common to try many more parameters and use more folds (<code class="language-plaintext highlighter-rouge">$k=3$</code> and <code class="language-plaintext highlighter-rouge">$k=10$</code> are common).
In other words, using <code class="language-plaintext highlighter-rouge">CrossValidator</code> can be very expensive.
However, it is also a well-established method for choosing parameters which is more statistically sound than heuristic hand-tuning.</p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.ml.tuning.CrossValidator.html"><code class="language-plaintext highlighter-rouge">CrossValidator</code> Python docs</a> for more details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.ml</span> <span class="kn">import</span> <span class="n">Pipeline</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.classification</span> <span class="kn">import</span> <span class="n">LogisticRegression</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.evaluation</span> <span class="kn">import</span> <span class="n">BinaryClassificationEvaluator</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.feature</span> <span class="kn">import</span> <span class="n">HashingTF</span><span class="p">,</span> <span class="n">Tokenizer</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.tuning</span> <span class="kn">import</span> <span class="n">CrossValidator</span><span class="p">,</span> <span class="n">ParamGridBuilder</span>
<span class="c1"># Prepare training documents, which are labeled.
</span><span class="n">training</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">createDataFrame</span><span class="p">([</span>
<span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="s">"a b c d e spark"</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="s">"b d"</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="s">"spark f g h"</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="s">"hadoop mapreduce"</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="s">"b spark who"</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="s">"g d a y"</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">6</span><span class="p">,</span> <span class="s">"spark fly"</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">7</span><span class="p">,</span> <span class="s">"was mapreduce"</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">8</span><span class="p">,</span> <span class="s">"e spark program"</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">9</span><span class="p">,</span> <span class="s">"a e c l"</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="s">"spark compile"</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">),</span>
<span class="p">(</span><span class="mi">11</span><span class="p">,</span> <span class="s">"hadoop software"</span><span class="p">,</span> <span class="mf">0.0</span><span class="p">)</span>
<span class="p">],</span> <span class="p">[</span><span class="s">"id"</span><span class="p">,</span> <span class="s">"text"</span><span class="p">,</span> <span class="s">"label"</span><span class="p">])</span>
<span class="c1"># Configure an ML pipeline, which consists of tree stages: tokenizer, hashingTF, and lr.
</span><span class="n">tokenizer</span> <span class="o">=</span> <span class="n">Tokenizer</span><span class="p">(</span><span class="n">inputCol</span><span class="o">=</span><span class="s">"text"</span><span class="p">,</span> <span class="n">outputCol</span><span class="o">=</span><span class="s">"words"</span><span class="p">)</span>
<span class="n">hashingTF</span> <span class="o">=</span> <span class="n">HashingTF</span><span class="p">(</span><span class="n">inputCol</span><span class="o">=</span><span class="n">tokenizer</span><span class="p">.</span><span class="n">getOutputCol</span><span class="p">(),</span> <span class="n">outputCol</span><span class="o">=</span><span class="s">"features"</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="n">LogisticRegression</span><span class="p">(</span><span class="n">maxIter</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>
<span class="n">pipeline</span> <span class="o">=</span> <span class="n">Pipeline</span><span class="p">(</span><span class="n">stages</span><span class="o">=</span><span class="p">[</span><span class="n">tokenizer</span><span class="p">,</span> <span class="n">hashingTF</span><span class="p">,</span> <span class="n">lr</span><span class="p">])</span>
<span class="c1"># We now treat the Pipeline as an Estimator, wrapping it in a CrossValidator instance.
# This will allow us to jointly choose parameters for all Pipeline stages.
# A CrossValidator requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.
# We use a ParamGridBuilder to construct a grid of parameters to search over.
# With 3 values for hashingTF.numFeatures and 2 values for lr.regParam,
# this grid will have 3 x 2 = 6 parameter settings for CrossValidator to choose from.
</span><span class="n">paramGrid</span> <span class="o">=</span> <span class="n">ParamGridBuilder</span><span class="p">()</span> \
<span class="p">.</span><span class="n">addGrid</span><span class="p">(</span><span class="n">hashingTF</span><span class="p">.</span><span class="n">numFeatures</span><span class="p">,</span> <span class="p">[</span><span class="mi">10</span><span class="p">,</span> <span class="mi">100</span><span class="p">,</span> <span class="mi">1000</span><span class="p">])</span> \
<span class="p">.</span><span class="n">addGrid</span><span class="p">(</span><span class="n">lr</span><span class="p">.</span><span class="n">regParam</span><span class="p">,</span> <span class="p">[</span><span class="mf">0.1</span><span class="p">,</span> <span class="mf">0.01</span><span class="p">])</span> \
<span class="p">.</span><span class="n">build</span><span class="p">()</span>
<span class="n">crossval</span> <span class="o">=</span> <span class="n">CrossValidator</span><span class="p">(</span><span class="n">estimator</span><span class="o">=</span><span class="n">pipeline</span><span class="p">,</span>
<span class="n">estimatorParamMaps</span><span class="o">=</span><span class="n">paramGrid</span><span class="p">,</span>
<span class="n">evaluator</span><span class="o">=</span><span class="n">BinaryClassificationEvaluator</span><span class="p">(),</span>
<span class="n">numFolds</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span> <span class="c1"># use 3+ folds in practice
</span>
<span class="c1"># Run cross-validation, and choose the best set of parameters.
</span><span class="n">cvModel</span> <span class="o">=</span> <span class="n">crossval</span><span class="p">.</span><span class="n">fit</span><span class="p">(</span><span class="n">training</span><span class="p">)</span>
<span class="c1"># Prepare test documents, which are unlabeled.
</span><span class="n">test</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">createDataFrame</span><span class="p">([</span>
<span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="s">"spark i j k"</span><span class="p">),</span>
<span class="p">(</span><span class="mi">5</span><span class="p">,</span> <span class="s">"l m n"</span><span class="p">),</span>
<span class="p">(</span><span class="mi">6</span><span class="p">,</span> <span class="s">"mapreduce spark"</span><span class="p">),</span>
<span class="p">(</span><span class="mi">7</span><span class="p">,</span> <span class="s">"apache hadoop"</span><span class="p">)</span>
<span class="p">],</span> <span class="p">[</span><span class="s">"id"</span><span class="p">,</span> <span class="s">"text"</span><span class="p">])</span>
<span class="c1"># Make predictions on test documents. cvModel uses the best model found (lrModel).
</span><span class="n">prediction</span> <span class="o">=</span> <span class="n">cvModel</span><span class="p">.</span><span class="n">transform</span><span class="p">(</span><span class="n">test</span><span class="p">)</span>
<span class="n">selected</span> <span class="o">=</span> <span class="n">prediction</span><span class="p">.</span><span class="n">select</span><span class="p">(</span><span class="s">"id"</span><span class="p">,</span> <span class="s">"text"</span><span class="p">,</span> <span class="s">"probability"</span><span class="p">,</span> <span class="s">"prediction"</span><span class="p">)</span>
<span class="k">for</span> <span class="n">row</span> <span class="ow">in</span> <span class="n">selected</span><span class="p">.</span><span class="n">collect</span><span class="p">():</span>
<span class="k">print</span><span class="p">(</span><span class="n">row</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/ml/cross_validator.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/ml/tuning/CrossValidator.html"><code class="language-plaintext highlighter-rouge">CrossValidator</code> Scala docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.ml.Pipeline</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.classification.LogisticRegression</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.evaluation.BinaryClassificationEvaluator</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.feature.</span><span class="o">{</span><span class="nc">HashingTF</span><span class="o">,</span> <span class="nc">Tokenizer</span><span class="o">}</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.linalg.Vector</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.tuning.</span><span class="o">{</span><span class="nc">CrossValidator</span><span class="o">,</span> <span class="nc">ParamGridBuilder</span><span class="o">}</span>
<span class="k">import</span> <span class="nn">org.apache.spark.sql.Row</span>
<span class="c1">// Prepare training data from a list of (id, text, label) tuples.</span>
<span class="k">val</span> <span class="nv">training</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">createDataFrame</span><span class="o">(</span><span class="nc">Seq</span><span class="o">(</span>
<span class="o">(</span><span class="mi">0L</span><span class="o">,</span> <span class="s">"a b c d e spark"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">1L</span><span class="o">,</span> <span class="s">"b d"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">2L</span><span class="o">,</span> <span class="s">"spark f g h"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">3L</span><span class="o">,</span> <span class="s">"hadoop mapreduce"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">4L</span><span class="o">,</span> <span class="s">"b spark who"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">5L</span><span class="o">,</span> <span class="s">"g d a y"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">6L</span><span class="o">,</span> <span class="s">"spark fly"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">7L</span><span class="o">,</span> <span class="s">"was mapreduce"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">8L</span><span class="o">,</span> <span class="s">"e spark program"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">9L</span><span class="o">,</span> <span class="s">"a e c l"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">10L</span><span class="o">,</span> <span class="s">"spark compile"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="o">(</span><span class="mi">11L</span><span class="o">,</span> <span class="s">"hadoop software"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">)</span>
<span class="o">)).</span><span class="py">toDF</span><span class="o">(</span><span class="s">"id"</span><span class="o">,</span> <span class="s">"text"</span><span class="o">,</span> <span class="s">"label"</span><span class="o">)</span>
<span class="c1">// Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.</span>
<span class="k">val</span> <span class="nv">tokenizer</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">Tokenizer</span><span class="o">()</span>
<span class="o">.</span><span class="py">setInputCol</span><span class="o">(</span><span class="s">"text"</span><span class="o">)</span>
<span class="o">.</span><span class="py">setOutputCol</span><span class="o">(</span><span class="s">"words"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">hashingTF</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">HashingTF</span><span class="o">()</span>
<span class="o">.</span><span class="py">setInputCol</span><span class="o">(</span><span class="nv">tokenizer</span><span class="o">.</span><span class="py">getOutputCol</span><span class="o">)</span>
<span class="o">.</span><span class="py">setOutputCol</span><span class="o">(</span><span class="s">"features"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">lr</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">LogisticRegression</span><span class="o">()</span>
<span class="o">.</span><span class="py">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">pipeline</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">Pipeline</span><span class="o">()</span>
<span class="o">.</span><span class="py">setStages</span><span class="o">(</span><span class="nc">Array</span><span class="o">(</span><span class="n">tokenizer</span><span class="o">,</span> <span class="n">hashingTF</span><span class="o">,</span> <span class="n">lr</span><span class="o">))</span>
<span class="c1">// We use a ParamGridBuilder to construct a grid of parameters to search over.</span>
<span class="c1">// With 3 values for hashingTF.numFeatures and 2 values for lr.regParam,</span>
<span class="c1">// this grid will have 3 x 2 = 6 parameter settings for CrossValidator to choose from.</span>
<span class="k">val</span> <span class="nv">paramGrid</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">ParamGridBuilder</span><span class="o">()</span>
<span class="o">.</span><span class="py">addGrid</span><span class="o">(</span><span class="nv">hashingTF</span><span class="o">.</span><span class="py">numFeatures</span><span class="o">,</span> <span class="nc">Array</span><span class="o">(</span><span class="mi">10</span><span class="o">,</span> <span class="mi">100</span><span class="o">,</span> <span class="mi">1000</span><span class="o">))</span>
<span class="o">.</span><span class="py">addGrid</span><span class="o">(</span><span class="nv">lr</span><span class="o">.</span><span class="py">regParam</span><span class="o">,</span> <span class="nc">Array</span><span class="o">(</span><span class="mf">0.1</span><span class="o">,</span> <span class="mf">0.01</span><span class="o">))</span>
<span class="o">.</span><span class="py">build</span><span class="o">()</span>
<span class="c1">// We now treat the Pipeline as an Estimator, wrapping it in a CrossValidator instance.</span>
<span class="c1">// This will allow us to jointly choose parameters for all Pipeline stages.</span>
<span class="c1">// A CrossValidator requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.</span>
<span class="c1">// Note that the evaluator here is a BinaryClassificationEvaluator and its default metric</span>
<span class="c1">// is areaUnderROC.</span>
<span class="k">val</span> <span class="nv">cv</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">CrossValidator</span><span class="o">()</span>
<span class="o">.</span><span class="py">setEstimator</span><span class="o">(</span><span class="n">pipeline</span><span class="o">)</span>
<span class="o">.</span><span class="py">setEvaluator</span><span class="o">(</span><span class="k">new</span> <span class="nc">BinaryClassificationEvaluator</span><span class="o">)</span>
<span class="o">.</span><span class="py">setEstimatorParamMaps</span><span class="o">(</span><span class="n">paramGrid</span><span class="o">)</span>
<span class="o">.</span><span class="py">setNumFolds</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span> <span class="c1">// Use 3+ in practice</span>
<span class="o">.</span><span class="py">setParallelism</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span> <span class="c1">// Evaluate up to 2 parameter settings in parallel</span>
<span class="c1">// Run cross-validation, and choose the best set of parameters.</span>
<span class="k">val</span> <span class="nv">cvModel</span> <span class="k">=</span> <span class="nv">cv</span><span class="o">.</span><span class="py">fit</span><span class="o">(</span><span class="n">training</span><span class="o">)</span>
<span class="c1">// Prepare test documents, which are unlabeled (id, text) tuples.</span>
<span class="k">val</span> <span class="nv">test</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">createDataFrame</span><span class="o">(</span><span class="nc">Seq</span><span class="o">(</span>
<span class="o">(</span><span class="mi">4L</span><span class="o">,</span> <span class="s">"spark i j k"</span><span class="o">),</span>
<span class="o">(</span><span class="mi">5L</span><span class="o">,</span> <span class="s">"l m n"</span><span class="o">),</span>
<span class="o">(</span><span class="mi">6L</span><span class="o">,</span> <span class="s">"mapreduce spark"</span><span class="o">),</span>
<span class="o">(</span><span class="mi">7L</span><span class="o">,</span> <span class="s">"apache hadoop"</span><span class="o">)</span>
<span class="o">)).</span><span class="py">toDF</span><span class="o">(</span><span class="s">"id"</span><span class="o">,</span> <span class="s">"text"</span><span class="o">)</span>
<span class="c1">// Make predictions on test documents. cvModel uses the best model found (lrModel).</span>
<span class="nv">cvModel</span><span class="o">.</span><span class="py">transform</span><span class="o">(</span><span class="n">test</span><span class="o">)</span>
<span class="o">.</span><span class="py">select</span><span class="o">(</span><span class="s">"id"</span><span class="o">,</span> <span class="s">"text"</span><span class="o">,</span> <span class="s">"probability"</span><span class="o">,</span> <span class="s">"prediction"</span><span class="o">)</span>
<span class="o">.</span><span class="py">collect</span><span class="o">()</span>
<span class="o">.</span><span class="py">foreach</span> <span class="o">{</span> <span class="k">case</span> <span class="nc">Row</span><span class="o">(</span><span class="n">id</span><span class="k">:</span> <span class="kt">Long</span><span class="o">,</span> <span class="n">text</span><span class="k">:</span> <span class="kt">String</span><span class="o">,</span> <span class="n">prob</span><span class="k">:</span> <span class="kt">Vector</span><span class="o">,</span> <span class="n">prediction</span><span class="k">:</span> <span class="kt">Double</span><span class="o">)</span> <span class="k">=&gt;</span>
<span class="nf">println</span><span class="o">(</span><span class="n">s</span><span class="s">"($id, $text) --&gt; prob=$prob, prediction=$prediction"</span><span class="o">)</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaCrossValidationExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/ml/tuning/CrossValidator.html"><code class="language-plaintext highlighter-rouge">CrossValidator</code> Java docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">java.util.Arrays</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.Pipeline</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.PipelineStage</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.classification.LogisticRegression</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.evaluation.BinaryClassificationEvaluator</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.feature.HashingTF</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.feature.Tokenizer</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.param.ParamMap</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.tuning.CrossValidator</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.tuning.CrossValidatorModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.tuning.ParamGridBuilder</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="c1">// Prepare training documents, which are labeled.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">training</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">createDataFrame</span><span class="o">(</span><span class="nc">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">0L</span><span class="o">,</span> <span class="s">"a b c d e spark"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">1L</span><span class="o">,</span> <span class="s">"b d"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">2L</span><span class="o">,</span><span class="s">"spark f g h"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">3L</span><span class="o">,</span> <span class="s">"hadoop mapreduce"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">4L</span><span class="o">,</span> <span class="s">"b spark who"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">5L</span><span class="o">,</span> <span class="s">"g d a y"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">6L</span><span class="o">,</span> <span class="s">"spark fly"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">7L</span><span class="o">,</span> <span class="s">"was mapreduce"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">8L</span><span class="o">,</span> <span class="s">"e spark program"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">9L</span><span class="o">,</span> <span class="s">"a e c l"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">10L</span><span class="o">,</span> <span class="s">"spark compile"</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaLabeledDocument</span><span class="o">(</span><span class="mi">11L</span><span class="o">,</span> <span class="s">"hadoop software"</span><span class="o">,</span> <span class="mf">0.0</span><span class="o">)</span>
<span class="o">),</span> <span class="nc">JavaLabeledDocument</span><span class="o">.</span><span class="na">class</span><span class="o">);</span>
<span class="c1">// Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.</span>
<span class="nc">Tokenizer</span> <span class="n">tokenizer</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">Tokenizer</span><span class="o">()</span>
<span class="o">.</span><span class="na">setInputCol</span><span class="o">(</span><span class="s">"text"</span><span class="o">)</span>
<span class="o">.</span><span class="na">setOutputCol</span><span class="o">(</span><span class="s">"words"</span><span class="o">);</span>
<span class="nc">HashingTF</span> <span class="n">hashingTF</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">HashingTF</span><span class="o">()</span>
<span class="o">.</span><span class="na">setNumFeatures</span><span class="o">(</span><span class="mi">1000</span><span class="o">)</span>
<span class="o">.</span><span class="na">setInputCol</span><span class="o">(</span><span class="n">tokenizer</span><span class="o">.</span><span class="na">getOutputCol</span><span class="o">())</span>
<span class="o">.</span><span class="na">setOutputCol</span><span class="o">(</span><span class="s">"features"</span><span class="o">);</span>
<span class="nc">LogisticRegression</span> <span class="n">lr</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">LogisticRegression</span><span class="o">()</span>
<span class="o">.</span><span class="na">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">)</span>
<span class="o">.</span><span class="na">setRegParam</span><span class="o">(</span><span class="mf">0.01</span><span class="o">);</span>
<span class="nc">Pipeline</span> <span class="n">pipeline</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">Pipeline</span><span class="o">()</span>
<span class="o">.</span><span class="na">setStages</span><span class="o">(</span><span class="k">new</span> <span class="nc">PipelineStage</span><span class="o">[]</span> <span class="o">{</span><span class="n">tokenizer</span><span class="o">,</span> <span class="n">hashingTF</span><span class="o">,</span> <span class="n">lr</span><span class="o">});</span>
<span class="c1">// We use a ParamGridBuilder to construct a grid of parameters to search over.</span>
<span class="c1">// With 3 values for hashingTF.numFeatures and 2 values for lr.regParam,</span>
<span class="c1">// this grid will have 3 x 2 = 6 parameter settings for CrossValidator to choose from.</span>
<span class="nc">ParamMap</span><span class="o">[]</span> <span class="n">paramGrid</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">ParamGridBuilder</span><span class="o">()</span>
<span class="o">.</span><span class="na">addGrid</span><span class="o">(</span><span class="n">hashingTF</span><span class="o">.</span><span class="na">numFeatures</span><span class="o">(),</span> <span class="k">new</span> <span class="kt">int</span><span class="o">[]</span> <span class="o">{</span><span class="mi">10</span><span class="o">,</span> <span class="mi">100</span><span class="o">,</span> <span class="mi">1000</span><span class="o">})</span>
<span class="o">.</span><span class="na">addGrid</span><span class="o">(</span><span class="n">lr</span><span class="o">.</span><span class="na">regParam</span><span class="o">(),</span> <span class="k">new</span> <span class="kt">double</span><span class="o">[]</span> <span class="o">{</span><span class="mf">0.1</span><span class="o">,</span> <span class="mf">0.01</span><span class="o">})</span>
<span class="o">.</span><span class="na">build</span><span class="o">();</span>
<span class="c1">// We now treat the Pipeline as an Estimator, wrapping it in a CrossValidator instance.</span>
<span class="c1">// This will allow us to jointly choose parameters for all Pipeline stages.</span>
<span class="c1">// A CrossValidator requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.</span>
<span class="c1">// Note that the evaluator here is a BinaryClassificationEvaluator and its default metric</span>
<span class="c1">// is areaUnderROC.</span>
<span class="nc">CrossValidator</span> <span class="n">cv</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">CrossValidator</span><span class="o">()</span>
<span class="o">.</span><span class="na">setEstimator</span><span class="o">(</span><span class="n">pipeline</span><span class="o">)</span>
<span class="o">.</span><span class="na">setEvaluator</span><span class="o">(</span><span class="k">new</span> <span class="nc">BinaryClassificationEvaluator</span><span class="o">())</span>
<span class="o">.</span><span class="na">setEstimatorParamMaps</span><span class="o">(</span><span class="n">paramGrid</span><span class="o">)</span>
<span class="o">.</span><span class="na">setNumFolds</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span> <span class="c1">// Use 3+ in practice</span>
<span class="o">.</span><span class="na">setParallelism</span><span class="o">(</span><span class="mi">2</span><span class="o">);</span> <span class="c1">// Evaluate up to 2 parameter settings in parallel</span>
<span class="c1">// Run cross-validation, and choose the best set of parameters.</span>
<span class="nc">CrossValidatorModel</span> <span class="n">cvModel</span> <span class="o">=</span> <span class="n">cv</span><span class="o">.</span><span class="na">fit</span><span class="o">(</span><span class="n">training</span><span class="o">);</span>
<span class="c1">// Prepare test documents, which are unlabeled.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">test</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">createDataFrame</span><span class="o">(</span><span class="nc">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span>
<span class="k">new</span> <span class="nf">JavaDocument</span><span class="o">(</span><span class="mi">4L</span><span class="o">,</span> <span class="s">"spark i j k"</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaDocument</span><span class="o">(</span><span class="mi">5L</span><span class="o">,</span> <span class="s">"l m n"</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaDocument</span><span class="o">(</span><span class="mi">6L</span><span class="o">,</span> <span class="s">"mapreduce spark"</span><span class="o">),</span>
<span class="k">new</span> <span class="nf">JavaDocument</span><span class="o">(</span><span class="mi">7L</span><span class="o">,</span> <span class="s">"apache hadoop"</span><span class="o">)</span>
<span class="o">),</span> <span class="nc">JavaDocument</span><span class="o">.</span><span class="na">class</span><span class="o">);</span>
<span class="c1">// Make predictions on test documents. cvModel uses the best model found (lrModel).</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">predictions</span> <span class="o">=</span> <span class="n">cvModel</span><span class="o">.</span><span class="na">transform</span><span class="o">(</span><span class="n">test</span><span class="o">);</span>
<span class="k">for</span> <span class="o">(</span><span class="nc">Row</span> <span class="n">r</span> <span class="o">:</span> <span class="n">predictions</span><span class="o">.</span><span class="na">select</span><span class="o">(</span><span class="s">"id"</span><span class="o">,</span> <span class="s">"text"</span><span class="o">,</span> <span class="s">"probability"</span><span class="o">,</span> <span class="s">"prediction"</span><span class="o">).</span><span class="na">collectAsList</span><span class="o">())</span> <span class="o">{</span>
<span class="nc">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"("</span> <span class="o">+</span> <span class="n">r</span><span class="o">.</span><span class="na">get</span><span class="o">(</span><span class="mi">0</span><span class="o">)</span> <span class="o">+</span> <span class="s">", "</span> <span class="o">+</span> <span class="n">r</span><span class="o">.</span><span class="na">get</span><span class="o">(</span><span class="mi">1</span><span class="o">)</span> <span class="o">+</span> <span class="s">") --&gt; prob="</span> <span class="o">+</span> <span class="n">r</span><span class="o">.</span><span class="na">get</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span>
<span class="o">+</span> <span class="s">", prediction="</span> <span class="o">+</span> <span class="n">r</span><span class="o">.</span><span class="na">get</span><span class="o">(</span><span class="mi">3</span><span class="o">));</span>
<span class="o">}</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaCrossValidationExample.java" in the Spark repo.</small></div>
</div>
</div>
<h1 id="train-validation-split">Train-Validation Split</h1>
<p>In addition to <code class="language-plaintext highlighter-rouge">CrossValidator</code> Spark also offers <code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> for hyper-parameter tuning.
<code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> only evaluates each combination of parameters once, as opposed to k times in
the case of <code class="language-plaintext highlighter-rouge">CrossValidator</code>. It is, therefore, less expensive,
but will not produce as reliable results when the training dataset is not sufficiently large.</p>
<p>Unlike <code class="language-plaintext highlighter-rouge">CrossValidator</code>, <code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> creates a single (training, test) dataset pair.
It splits the dataset into these two parts using the <code class="language-plaintext highlighter-rouge">trainRatio</code> parameter. For example with <code class="language-plaintext highlighter-rouge">$trainRatio=0.75$</code>,
<code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> will generate a training and test dataset pair where 75% of the data is used for training and 25% for validation.</p>
<p>Like <code class="language-plaintext highlighter-rouge">CrossValidator</code>, <code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> finally fits the <code class="language-plaintext highlighter-rouge">Estimator</code> using the best <code class="language-plaintext highlighter-rouge">ParamMap</code> and the entire dataset.</p>
<p><strong>Examples: model selection via train validation split</strong></p>
<div class="codetabs">
<div data-lang="python">
<p>Refer to the <a href="api/python/reference/api/pyspark.ml.tuning.TrainValidationSplit.html"><code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> Python docs</a> for more details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">from</span> <span class="nn">pyspark.ml.evaluation</span> <span class="kn">import</span> <span class="n">RegressionEvaluator</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.regression</span> <span class="kn">import</span> <span class="n">LinearRegression</span>
<span class="kn">from</span> <span class="nn">pyspark.ml.tuning</span> <span class="kn">import</span> <span class="n">ParamGridBuilder</span><span class="p">,</span> <span class="n">TrainValidationSplit</span>
<span class="c1"># Prepare training and test data.
</span><span class="n">data</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"libsvm"</span><span class="p">)</span>\
<span class="p">.</span><span class="n">load</span><span class="p">(</span><span class="s">"data/mllib/sample_linear_regression_data.txt"</span><span class="p">)</span>
<span class="n">train</span><span class="p">,</span> <span class="n">test</span> <span class="o">=</span> <span class="n">data</span><span class="p">.</span><span class="n">randomSplit</span><span class="p">([</span><span class="mf">0.9</span><span class="p">,</span> <span class="mf">0.1</span><span class="p">],</span> <span class="n">seed</span><span class="o">=</span><span class="mi">12345</span><span class="p">)</span>
<span class="n">lr</span> <span class="o">=</span> <span class="n">LinearRegression</span><span class="p">(</span><span class="n">maxIter</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span>
<span class="c1"># We use a ParamGridBuilder to construct a grid of parameters to search over.
# TrainValidationSplit will try all combinations of values and determine best model using
# the evaluator.
</span><span class="n">paramGrid</span> <span class="o">=</span> <span class="n">ParamGridBuilder</span><span class="p">()</span>\
<span class="p">.</span><span class="n">addGrid</span><span class="p">(</span><span class="n">lr</span><span class="p">.</span><span class="n">regParam</span><span class="p">,</span> <span class="p">[</span><span class="mf">0.1</span><span class="p">,</span> <span class="mf">0.01</span><span class="p">])</span> \
<span class="p">.</span><span class="n">addGrid</span><span class="p">(</span><span class="n">lr</span><span class="p">.</span><span class="n">fitIntercept</span><span class="p">,</span> <span class="p">[</span><span class="bp">False</span><span class="p">,</span> <span class="bp">True</span><span class="p">])</span>\
<span class="p">.</span><span class="n">addGrid</span><span class="p">(</span><span class="n">lr</span><span class="p">.</span><span class="n">elasticNetParam</span><span class="p">,</span> <span class="p">[</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">0.5</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">])</span>\
<span class="p">.</span><span class="n">build</span><span class="p">()</span>
<span class="c1"># In this case the estimator is simply the linear regression.
# A TrainValidationSplit requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.
</span><span class="n">tvs</span> <span class="o">=</span> <span class="n">TrainValidationSplit</span><span class="p">(</span><span class="n">estimator</span><span class="o">=</span><span class="n">lr</span><span class="p">,</span>
<span class="n">estimatorParamMaps</span><span class="o">=</span><span class="n">paramGrid</span><span class="p">,</span>
<span class="n">evaluator</span><span class="o">=</span><span class="n">RegressionEvaluator</span><span class="p">(),</span>
<span class="c1"># 80% of the data will be used for training, 20% for validation.
</span> <span class="n">trainRatio</span><span class="o">=</span><span class="mf">0.8</span><span class="p">)</span>
<span class="c1"># Run TrainValidationSplit, and choose the best set of parameters.
</span><span class="n">model</span> <span class="o">=</span> <span class="n">tvs</span><span class="p">.</span><span class="n">fit</span><span class="p">(</span><span class="n">train</span><span class="p">)</span>
<span class="c1"># Make predictions on test data. model is the model with combination of parameters
# that performed best.
</span><span class="n">model</span><span class="p">.</span><span class="n">transform</span><span class="p">(</span><span class="n">test</span><span class="p">)</span>\
<span class="p">.</span><span class="n">select</span><span class="p">(</span><span class="s">"features"</span><span class="p">,</span> <span class="s">"label"</span><span class="p">,</span> <span class="s">"prediction"</span><span class="p">)</span>\
<span class="p">.</span><span class="n">show</span><span class="p">()</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/ml/train_validation_split.py" in the Spark repo.</small></div>
</div>
<div data-lang="scala">
<p>Refer to the <a href="api/scala/org/apache/spark/ml/tuning/TrainValidationSplit.html"><code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> Scala docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="k">import</span> <span class="nn">org.apache.spark.ml.evaluation.RegressionEvaluator</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.regression.LinearRegression</span>
<span class="k">import</span> <span class="nn">org.apache.spark.ml.tuning.</span><span class="o">{</span><span class="nc">ParamGridBuilder</span><span class="o">,</span> <span class="nc">TrainValidationSplit</span><span class="o">}</span>
<span class="c1">// Prepare training and test data.</span>
<span class="k">val</span> <span class="nv">data</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">).</span><span class="py">load</span><span class="o">(</span><span class="s">"data/mllib/sample_linear_regression_data.txt"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">Array</span><span class="o">(</span><span class="n">training</span><span class="o">,</span> <span class="n">test</span><span class="o">)</span> <span class="k">=</span> <span class="nv">data</span><span class="o">.</span><span class="py">randomSplit</span><span class="o">(</span><span class="nc">Array</span><span class="o">(</span><span class="mf">0.9</span><span class="o">,</span> <span class="mf">0.1</span><span class="o">),</span> <span class="n">seed</span> <span class="k">=</span> <span class="mi">12345</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">lr</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">LinearRegression</span><span class="o">()</span>
<span class="o">.</span><span class="py">setMaxIter</span><span class="o">(</span><span class="mi">10</span><span class="o">)</span>
<span class="c1">// We use a ParamGridBuilder to construct a grid of parameters to search over.</span>
<span class="c1">// TrainValidationSplit will try all combinations of values and determine best model using</span>
<span class="c1">// the evaluator.</span>
<span class="k">val</span> <span class="nv">paramGrid</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">ParamGridBuilder</span><span class="o">()</span>
<span class="o">.</span><span class="py">addGrid</span><span class="o">(</span><span class="nv">lr</span><span class="o">.</span><span class="py">regParam</span><span class="o">,</span> <span class="nc">Array</span><span class="o">(</span><span class="mf">0.1</span><span class="o">,</span> <span class="mf">0.01</span><span class="o">))</span>
<span class="o">.</span><span class="py">addGrid</span><span class="o">(</span><span class="nv">lr</span><span class="o">.</span><span class="py">fitIntercept</span><span class="o">)</span>
<span class="o">.</span><span class="py">addGrid</span><span class="o">(</span><span class="nv">lr</span><span class="o">.</span><span class="py">elasticNetParam</span><span class="o">,</span> <span class="nc">Array</span><span class="o">(</span><span class="mf">0.0</span><span class="o">,</span> <span class="mf">0.5</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">))</span>
<span class="o">.</span><span class="py">build</span><span class="o">()</span>
<span class="c1">// In this case the estimator is simply the linear regression.</span>
<span class="c1">// A TrainValidationSplit requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.</span>
<span class="k">val</span> <span class="nv">trainValidationSplit</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">TrainValidationSplit</span><span class="o">()</span>
<span class="o">.</span><span class="py">setEstimator</span><span class="o">(</span><span class="n">lr</span><span class="o">)</span>
<span class="o">.</span><span class="py">setEvaluator</span><span class="o">(</span><span class="k">new</span> <span class="nc">RegressionEvaluator</span><span class="o">)</span>
<span class="o">.</span><span class="py">setEstimatorParamMaps</span><span class="o">(</span><span class="n">paramGrid</span><span class="o">)</span>
<span class="c1">// 80% of the data will be used for training and the remaining 20% for validation.</span>
<span class="o">.</span><span class="py">setTrainRatio</span><span class="o">(</span><span class="mf">0.8</span><span class="o">)</span>
<span class="c1">// Evaluate up to 2 parameter settings in parallel</span>
<span class="o">.</span><span class="py">setParallelism</span><span class="o">(</span><span class="mi">2</span><span class="o">)</span>
<span class="c1">// Run train validation split, and choose the best set of parameters.</span>
<span class="k">val</span> <span class="nv">model</span> <span class="k">=</span> <span class="nv">trainValidationSplit</span><span class="o">.</span><span class="py">fit</span><span class="o">(</span><span class="n">training</span><span class="o">)</span>
<span class="c1">// Make predictions on test data. model is the model with combination of parameters</span>
<span class="c1">// that performed best.</span>
<span class="nv">model</span><span class="o">.</span><span class="py">transform</span><span class="o">(</span><span class="n">test</span><span class="o">)</span>
<span class="o">.</span><span class="py">select</span><span class="o">(</span><span class="s">"features"</span><span class="o">,</span> <span class="s">"label"</span><span class="o">,</span> <span class="s">"prediction"</span><span class="o">)</span>
<span class="o">.</span><span class="py">show</span><span class="o">()</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/ml/ModelSelectionViaTrainValidationSplitExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Refer to the <a href="api/java/org/apache/spark/ml/tuning/TrainValidationSplit.html"><code class="language-plaintext highlighter-rouge">TrainValidationSplit</code> Java docs</a> for details on the API.</p>
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.ml.evaluation.RegressionEvaluator</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.param.ParamMap</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.regression.LinearRegression</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.tuning.ParamGridBuilder</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.tuning.TrainValidationSplit</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.ml.tuning.TrainValidationSplitModel</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">data</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">format</span><span class="o">(</span><span class="s">"libsvm"</span><span class="o">)</span>
<span class="o">.</span><span class="na">load</span><span class="o">(</span><span class="s">"data/mllib/sample_linear_regression_data.txt"</span><span class="o">);</span>
<span class="c1">// Prepare training and test data.</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;[]</span> <span class="n">splits</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="na">randomSplit</span><span class="o">(</span><span class="k">new</span> <span class="kt">double</span><span class="o">[]</span> <span class="o">{</span><span class="mf">0.9</span><span class="o">,</span> <span class="mf">0.1</span><span class="o">},</span> <span class="mi">12345</span><span class="o">);</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">training</span> <span class="o">=</span> <span class="n">splits</span><span class="o">[</span><span class="mi">0</span><span class="o">];</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">test</span> <span class="o">=</span> <span class="n">splits</span><span class="o">[</span><span class="mi">1</span><span class="o">];</span>
<span class="nc">LinearRegression</span> <span class="n">lr</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">LinearRegression</span><span class="o">();</span>
<span class="c1">// We use a ParamGridBuilder to construct a grid of parameters to search over.</span>
<span class="c1">// TrainValidationSplit will try all combinations of values and determine best model using</span>
<span class="c1">// the evaluator.</span>
<span class="nc">ParamMap</span><span class="o">[]</span> <span class="n">paramGrid</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">ParamGridBuilder</span><span class="o">()</span>
<span class="o">.</span><span class="na">addGrid</span><span class="o">(</span><span class="n">lr</span><span class="o">.</span><span class="na">regParam</span><span class="o">(),</span> <span class="k">new</span> <span class="kt">double</span><span class="o">[]</span> <span class="o">{</span><span class="mf">0.1</span><span class="o">,</span> <span class="mf">0.01</span><span class="o">})</span>
<span class="o">.</span><span class="na">addGrid</span><span class="o">(</span><span class="n">lr</span><span class="o">.</span><span class="na">fitIntercept</span><span class="o">())</span>
<span class="o">.</span><span class="na">addGrid</span><span class="o">(</span><span class="n">lr</span><span class="o">.</span><span class="na">elasticNetParam</span><span class="o">(),</span> <span class="k">new</span> <span class="kt">double</span><span class="o">[]</span> <span class="o">{</span><span class="mf">0.0</span><span class="o">,</span> <span class="mf">0.5</span><span class="o">,</span> <span class="mf">1.0</span><span class="o">})</span>
<span class="o">.</span><span class="na">build</span><span class="o">();</span>
<span class="c1">// In this case the estimator is simply the linear regression.</span>
<span class="c1">// A TrainValidationSplit requires an Estimator, a set of Estimator ParamMaps, and an Evaluator.</span>
<span class="nc">TrainValidationSplit</span> <span class="n">trainValidationSplit</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">TrainValidationSplit</span><span class="o">()</span>
<span class="o">.</span><span class="na">setEstimator</span><span class="o">(</span><span class="n">lr</span><span class="o">)</span>
<span class="o">.</span><span class="na">setEvaluator</span><span class="o">(</span><span class="k">new</span> <span class="nc">RegressionEvaluator</span><span class="o">())</span>
<span class="o">.</span><span class="na">setEstimatorParamMaps</span><span class="o">(</span><span class="n">paramGrid</span><span class="o">)</span>
<span class="o">.</span><span class="na">setTrainRatio</span><span class="o">(</span><span class="mf">0.8</span><span class="o">)</span> <span class="c1">// 80% for training and the remaining 20% for validation</span>
<span class="o">.</span><span class="na">setParallelism</span><span class="o">(</span><span class="mi">2</span><span class="o">);</span> <span class="c1">// Evaluate up to 2 parameter settings in parallel</span>
<span class="c1">// Run train validation split, and choose the best set of parameters.</span>
<span class="nc">TrainValidationSplitModel</span> <span class="n">model</span> <span class="o">=</span> <span class="n">trainValidationSplit</span><span class="o">.</span><span class="na">fit</span><span class="o">(</span><span class="n">training</span><span class="o">);</span>
<span class="c1">// Make predictions on test data. model is the model with combination of parameters</span>
<span class="c1">// that performed best.</span>
<span class="n">model</span><span class="o">.</span><span class="na">transform</span><span class="o">(</span><span class="n">test</span><span class="o">)</span>
<span class="o">.</span><span class="na">select</span><span class="o">(</span><span class="s">"features"</span><span class="o">,</span> <span class="s">"label"</span><span class="o">,</span> <span class="s">"prediction"</span><span class="o">)</span>
<span class="o">.</span><span class="na">show</span><span class="o">();</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/ml/JavaModelSelectionViaTrainValidationSplitExample.java" in the Spark repo.</small></div>
</div>
</div>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.5.1.min.js"></script>
<script src="js/vendor/bootstrap.bundle.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<script type="text/javascript" src="js/vendor/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:3.5.0"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>