| <!DOCTYPE html> |
| <!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]--> |
| <!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]--> |
| <!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]--> |
| <!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]--> |
| <head> |
| <meta charset="utf-8"> |
| <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> |
| <title>Basic Statistics - MLlib - Spark 1.2.2 Documentation</title> |
| <meta name="description" content=""> |
| |
| |
| |
| <link rel="stylesheet" href="css/bootstrap.min.css"> |
| <style> |
| body { |
| padding-top: 60px; |
| padding-bottom: 40px; |
| } |
| </style> |
| <meta name="viewport" content="width=device-width"> |
| <link rel="stylesheet" href="css/bootstrap-responsive.min.css"> |
| <link rel="stylesheet" href="css/main.css"> |
| |
| <script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script> |
| |
| <link rel="stylesheet" href="css/pygments-default.css"> |
| |
| |
| <!-- Google analytics script --> |
| <script type="text/javascript"> |
| var _gaq = _gaq || []; |
| _gaq.push(['_setAccount', 'UA-32518208-2']); |
| _gaq.push(['_trackPageview']); |
| |
| (function() { |
| var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; |
| ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; |
| var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); |
| })(); |
| </script> |
| |
| |
| </head> |
| <body> |
| <!--[if lt IE 7]> |
| <p class="chromeframe">You are using an outdated browser. <a href="http://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p> |
| <![endif]--> |
| |
| <!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html --> |
| |
| <div class="navbar navbar-fixed-top" id="topbar"> |
| <div class="navbar-inner"> |
| <div class="container"> |
| <div class="brand"><a href="index.html"> |
| <img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">1.2.2</span> |
| </div> |
| <ul class="nav"> |
| <!--TODO(andyk): Add class="active" attribute to li some how.--> |
| <li><a href="index.html">Overview</a></li> |
| |
| <li class="dropdown"> |
| <a href="#" class="dropdown-toggle" data-toggle="dropdown">Programming Guides<b class="caret"></b></a> |
| <ul class="dropdown-menu"> |
| <li><a href="quick-start.html">Quick Start</a></li> |
| <li><a href="programming-guide.html">Spark Programming Guide</a></li> |
| <li class="divider"></li> |
| <li><a href="streaming-programming-guide.html">Spark Streaming</a></li> |
| <li><a href="sql-programming-guide.html">Spark SQL</a></li> |
| <li><a href="mllib-guide.html">MLlib (Machine Learning)</a></li> |
| <li><a href="graphx-programming-guide.html">GraphX (Graph Processing)</a></li> |
| <li><a href="bagel-programming-guide.html">Bagel (Pregel on Spark)</a></li> |
| </ul> |
| </li> |
| |
| <li class="dropdown"> |
| <a href="#" class="dropdown-toggle" data-toggle="dropdown">API Docs<b class="caret"></b></a> |
| <ul class="dropdown-menu"> |
| <li><a href="api/scala/index.html#org.apache.spark.package">Scala</a></li> |
| <li><a href="api/java/index.html">Java</a></li> |
| <li><a href="api/python/index.html">Python</a></li> |
| </ul> |
| </li> |
| |
| <li class="dropdown"> |
| <a href="#" class="dropdown-toggle" data-toggle="dropdown">Deploying<b class="caret"></b></a> |
| <ul class="dropdown-menu"> |
| <li><a href="cluster-overview.html">Overview</a></li> |
| <li><a href="submitting-applications.html">Submitting Applications</a></li> |
| <li class="divider"></li> |
| <li><a href="spark-standalone.html">Spark Standalone</a></li> |
| <li><a href="running-on-mesos.html">Mesos</a></li> |
| <li><a href="running-on-yarn.html">YARN</a></li> |
| <li class="divider"></li> |
| <li><a href="ec2-scripts.html">Amazon EC2</a></li> |
| </ul> |
| </li> |
| |
| <li class="dropdown"> |
| <a href="api.html" class="dropdown-toggle" data-toggle="dropdown">More<b class="caret"></b></a> |
| <ul class="dropdown-menu"> |
| <li><a href="configuration.html">Configuration</a></li> |
| <li><a href="monitoring.html">Monitoring</a></li> |
| <li><a href="tuning.html">Tuning Guide</a></li> |
| <li><a href="job-scheduling.html">Job Scheduling</a></li> |
| <li><a href="security.html">Security</a></li> |
| <li><a href="hardware-provisioning.html">Hardware Provisioning</a></li> |
| <li><a href="hadoop-third-party-distributions.html">3<sup>rd</sup>-Party Hadoop Distros</a></li> |
| <li class="divider"></li> |
| <li><a href="building-spark.html">Building Spark</a></li> |
| <li><a href="https://cwiki.apache.org/confluence/display/SPARK/Contributing+to+Spark">Contributing to Spark</a></li> |
| <li><a href="https://cwiki.apache.org/confluence/display/SPARK/Supplemental+Spark+Projects">Supplemental Projects</a></li> |
| </ul> |
| </li> |
| </ul> |
| <!--<p class="navbar-text pull-right"><span class="version-text">v1.2.2</span></p>--> |
| </div> |
| </div> |
| </div> |
| |
| <div class="container" id="content"> |
| |
| <h1 class="title"><a href="mllib-guide.html">MLlib</a> - Basic Statistics</h1> |
| |
| |
| <ul id="markdown-toc"> |
| <li><a href="#summary-statistics">Summary statistics</a></li> |
| <li><a href="#correlations">Correlations</a></li> |
| <li><a href="#stratified-sampling">Stratified sampling</a></li> |
| <li><a href="#hypothesis-testing">Hypothesis testing</a></li> |
| <li><a href="#random-data-generation">Random data generation</a></li> |
| </ul> |
| |
| <p><code>\[ |
| \newcommand{\R}{\mathbb{R}} |
| \newcommand{\E}{\mathbb{E}} |
| \newcommand{\x}{\mathbf{x}} |
| \newcommand{\y}{\mathbf{y}} |
| \newcommand{\wv}{\mathbf{w}} |
| \newcommand{\av}{\mathbf{\alpha}} |
| \newcommand{\bv}{\mathbf{b}} |
| \newcommand{\N}{\mathbb{N}} |
| \newcommand{\id}{\mathbf{I}} |
| \newcommand{\ind}{\mathbf{1}} |
| \newcommand{\0}{\mathbf{0}} |
| \newcommand{\unit}{\mathbf{e}} |
| \newcommand{\one}{\mathbf{1}} |
| \newcommand{\zero}{\mathbf{0}} |
| \]</code></p> |
| |
| <h2 id="summary-statistics">Summary statistics</h2> |
| |
| <p>We provide column summary statistics for <code>RDD[Vector]</code> through the function <code>colStats</code> |
| available in <code>Statistics</code>.</p> |
| |
| <div class="codetabs"> |
| <div data-lang="scala"> |
| |
| <p><a href="api/scala/index.html#org.apache.spark.mllib.stat.Statistics$"><code>colStats()</code></a> returns an instance of |
| <a href="api/scala/index.html#org.apache.spark.mllib.stat.MultivariateStatisticalSummary"><code>MultivariateStatisticalSummary</code></a>, |
| which contains the column-wise max, min, mean, variance, and number of nonzeros, as well as the |
| total count.</p> |
| |
| <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vector</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.mllib.stat.</span><span class="o">{</span><span class="nc">MultivariateStatisticalSummary</span><span class="o">,</span> <span class="nc">Statistics</span><span class="o">}</span> |
| |
| <span class="k">val</span> <span class="n">observations</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">Vector</span><span class="o">]</span> <span class="k">=</span> <span class="o">...</span> <span class="c1">// an RDD of Vectors</span> |
| |
| <span class="c1">// Compute column summary statistics.</span> |
| <span class="k">val</span> <span class="n">summary</span><span class="k">:</span> <span class="kt">MultivariateStatisticalSummary</span> <span class="o">=</span> <span class="nc">Statistics</span><span class="o">.</span><span class="n">colStats</span><span class="o">(</span><span class="n">observations</span><span class="o">)</span> |
| <span class="n">println</span><span class="o">(</span><span class="n">summary</span><span class="o">.</span><span class="n">mean</span><span class="o">)</span> <span class="c1">// a dense vector containing the mean value for each column</span> |
| <span class="n">println</span><span class="o">(</span><span class="n">summary</span><span class="o">.</span><span class="n">variance</span><span class="o">)</span> <span class="c1">// column-wise variance</span> |
| <span class="n">println</span><span class="o">(</span><span class="n">summary</span><span class="o">.</span><span class="n">numNonzeros</span><span class="o">)</span> <span class="c1">// number of nonzeros in each column</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="java"> |
| |
| <p><a href="api/java/org/apache/spark/mllib/stat/Statistics.html"><code>colStats()</code></a> returns an instance of |
| <a href="api/java/org/apache/spark/mllib/stat/MultivariateStatisticalSummary.html"><code>MultivariateStatisticalSummary</code></a>, |
| which contains the column-wise max, min, mean, variance, and number of nonzeros, as well as the |
| total count.</p> |
| |
| <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaRDD</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaSparkContext</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.Vector</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.stat.MultivariateStatisticalSummary</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.stat.Statistics</span><span class="o">;</span> |
| |
| <span class="n">JavaSparkContext</span> <span class="n">jsc</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="n">JavaRDD</span><span class="o"><</span><span class="n">Vector</span><span class="o">></span> <span class="n">mat</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// an RDD of Vectors</span> |
| |
| <span class="c1">// Compute column summary statistics.</span> |
| <span class="n">MultivariateStatisticalSummary</span> <span class="n">summary</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="na">colStats</span><span class="o">(</span><span class="n">mat</span><span class="o">.</span><span class="na">rdd</span><span class="o">());</span> |
| <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">summary</span><span class="o">.</span><span class="na">mean</span><span class="o">());</span> <span class="c1">// a dense vector containing the mean value for each column</span> |
| <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">summary</span><span class="o">.</span><span class="na">variance</span><span class="o">());</span> <span class="c1">// column-wise variance</span> |
| <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">summary</span><span class="o">.</span><span class="na">numNonzeros</span><span class="o">());</span> <span class="c1">// number of nonzeros in each column</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="python"> |
| <p><a href="api/python/pyspark.mllib.stat.Statistics-class.html#colStats"><code>colStats()</code></a> returns an instance of |
| <a href="api/python/pyspark.mllib.stat.MultivariateStatisticalSummary-class.html"><code>MultivariateStatisticalSummary</code></a>, |
| which contains the column-wise max, min, mean, variance, and number of nonzeros, as well as the |
| total count.</p> |
| |
| <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="kn">from</span> <span class="nn">pyspark.mllib.stat</span> <span class="kn">import</span> <span class="n">Statistics</span> |
| |
| <span class="n">sc</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># SparkContext</span> |
| |
| <span class="n">mat</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># an RDD of Vectors</span> |
| |
| <span class="c"># Compute column summary statistics.</span> |
| <span class="n">summary</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="n">colStats</span><span class="p">(</span><span class="n">mat</span><span class="p">)</span> |
| <span class="k">print</span> <span class="n">summary</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span> |
| <span class="k">print</span> <span class="n">summary</span><span class="o">.</span><span class="n">variance</span><span class="p">()</span> |
| <span class="k">print</span> <span class="n">summary</span><span class="o">.</span><span class="n">numNonzeros</span><span class="p">()</span></code></pre></div> |
| |
| </div> |
| |
| </div> |
| |
| <h2 id="correlations">Correlations</h2> |
| |
| <p>Calculating the correlation between two series of data is a common operation in Statistics. In MLlib |
| we provide the flexibility to calculate pairwise correlations among many series. The supported |
| correlation methods are currently Pearson’s and Spearman’s correlation.</p> |
| |
| <div class="codetabs"> |
| <div data-lang="scala"> |
| <p><a href="api/scala/index.html#org.apache.spark.mllib.stat.Statistics$"><code>Statistics</code></a> provides methods to |
| calculate correlations between series. Depending on the type of input, two <code>RDD[Double]</code>s or |
| an <code>RDD[Vector]</code>, the output will be a <code>Double</code> or the correlation <code>Matrix</code> respectively.</p> |
| |
| <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">import</span> <span class="nn">org.apache.spark.SparkContext</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg._</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.mllib.stat.Statistics</span> |
| |
| <span class="k">val</span> <span class="n">sc</span><span class="k">:</span> <span class="kt">SparkContext</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="k">val</span> <span class="n">seriesX</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">Double</span><span class="o">]</span> <span class="k">=</span> <span class="o">...</span> <span class="c1">// a series</span> |
| <span class="k">val</span> <span class="n">seriesY</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">Double</span><span class="o">]</span> <span class="k">=</span> <span class="o">...</span> <span class="c1">// must have the same number of partitions and cardinality as seriesX</span> |
| |
| <span class="c1">// compute the correlation using Pearson's method. Enter "spearman" for Spearman's method. If a </span> |
| <span class="c1">// method is not specified, Pearson's method will be used by default. </span> |
| <span class="k">val</span> <span class="n">correlation</span><span class="k">:</span> <span class="kt">Double</span> <span class="o">=</span> <span class="nc">Statistics</span><span class="o">.</span><span class="n">corr</span><span class="o">(</span><span class="n">seriesX</span><span class="o">,</span> <span class="n">seriesY</span><span class="o">,</span> <span class="s">"pearson"</span><span class="o">)</span> |
| |
| <span class="k">val</span> <span class="n">data</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">Vector</span><span class="o">]</span> <span class="k">=</span> <span class="o">...</span> <span class="c1">// note that each Vector is a row and not a column</span> |
| |
| <span class="c1">// calculate the correlation matrix using Pearson's method. Use "spearman" for Spearman's method.</span> |
| <span class="c1">// If a method is not specified, Pearson's method will be used by default. </span> |
| <span class="k">val</span> <span class="n">correlMatrix</span><span class="k">:</span> <span class="kt">Matrix</span> <span class="o">=</span> <span class="nc">Statistics</span><span class="o">.</span><span class="n">corr</span><span class="o">(</span><span class="n">data</span><span class="o">,</span> <span class="s">"pearson"</span><span class="o">)</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="java"> |
| <p><a href="api/java/org/apache/spark/mllib/stat/Statistics.html"><code>Statistics</code></a> provides methods to |
| calculate correlations between series. Depending on the type of input, two <code>JavaDoubleRDD</code>s or |
| a <code>JavaRDD<Vector></code>, the output will be a <code>Double</code> or the correlation <code>Matrix</code> respectively.</p> |
| |
| <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaDoubleRDD</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaSparkContext</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.*</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.stat.Statistics</span><span class="o">;</span> |
| |
| <span class="n">JavaSparkContext</span> <span class="n">jsc</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="n">JavaDoubleRDD</span> <span class="n">seriesX</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// a series</span> |
| <span class="n">JavaDoubleRDD</span> <span class="n">seriesY</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// must have the same number of partitions and cardinality as seriesX</span> |
| |
| <span class="c1">// compute the correlation using Pearson's method. Enter "spearman" for Spearman's method. If a </span> |
| <span class="c1">// method is not specified, Pearson's method will be used by default. </span> |
| <span class="n">Double</span> <span class="n">correlation</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="na">corr</span><span class="o">(</span><span class="n">seriesX</span><span class="o">.</span><span class="na">srdd</span><span class="o">(),</span> <span class="n">seriesY</span><span class="o">.</span><span class="na">srdd</span><span class="o">(),</span> <span class="s">"pearson"</span><span class="o">);</span> |
| |
| <span class="n">JavaRDD</span><span class="o"><</span><span class="n">Vector</span><span class="o">></span> <span class="n">data</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// note that each Vector is a row and not a column</span> |
| |
| <span class="c1">// calculate the correlation matrix using Pearson's method. Use "spearman" for Spearman's method.</span> |
| <span class="c1">// If a method is not specified, Pearson's method will be used by default. </span> |
| <span class="n">Matrix</span> <span class="n">correlMatrix</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="na">corr</span><span class="o">(</span><span class="n">data</span><span class="o">.</span><span class="na">rdd</span><span class="o">(),</span> <span class="s">"pearson"</span><span class="o">);</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="python"> |
| <p><a href="api/python/pyspark.mllib.stat.Statistics-class.html"><code>Statistics</code></a> provides methods to |
| calculate correlations between series. Depending on the type of input, two <code>RDD[Double]</code>s or |
| an <code>RDD[Vector]</code>, the output will be a <code>Double</code> or the correlation <code>Matrix</code> respectively.</p> |
| |
| <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="kn">from</span> <span class="nn">pyspark.mllib.stat</span> <span class="kn">import</span> <span class="n">Statistics</span> |
| |
| <span class="n">sc</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># SparkContext</span> |
| |
| <span class="n">seriesX</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># a series</span> |
| <span class="n">seriesY</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># must have the same number of partitions and cardinality as seriesX</span> |
| |
| <span class="c"># Compute the correlation using Pearson's method. Enter "spearman" for Spearman's method. If a </span> |
| <span class="c"># method is not specified, Pearson's method will be used by default. </span> |
| <span class="k">print</span> <span class="n">Statistics</span><span class="o">.</span><span class="n">corr</span><span class="p">(</span><span class="n">seriesX</span><span class="p">,</span> <span class="n">seriesY</span><span class="p">,</span> <span class="n">method</span><span class="o">=</span><span class="s">"pearson"</span><span class="p">)</span> |
| |
| <span class="n">data</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># an RDD of Vectors</span> |
| <span class="c"># calculate the correlation matrix using Pearson's method. Use "spearman" for Spearman's method.</span> |
| <span class="c"># If a method is not specified, Pearson's method will be used by default. </span> |
| <span class="k">print</span> <span class="n">Statistics</span><span class="o">.</span><span class="n">corr</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="n">method</span><span class="o">=</span><span class="s">"pearson"</span><span class="p">)</span></code></pre></div> |
| |
| </div> |
| |
| </div> |
| |
| <h2 id="stratified-sampling">Stratified sampling</h2> |
| |
| <p>Unlike the other statistics functions, which reside in MLlib, stratified sampling methods, |
| <code>sampleByKey</code> and <code>sampleByKeyExact</code>, can be performed on RDD’s of key-value pairs. For stratified |
| sampling, the keys can be thought of as a label and the value as a specific attribute. For example |
| the key can be man or woman, or document ids, and the respective values can be the list of ages |
| of the people in the population or the list of words in the documents. The <code>sampleByKey</code> method |
| will flip a coin to decide whether an observation will be sampled or not, therefore requires one |
| pass over the data, and provides an <em>expected</em> sample size. <code>sampleByKeyExact</code> requires significant |
| more resources than the per-stratum simple random sampling used in <code>sampleByKey</code>, but will provide |
| the exact sampling size with 99.99% confidence. <code>sampleByKeyExact</code> is currently not supported in |
| python.</p> |
| |
| <div class="codetabs"> |
| <div data-lang="scala"> |
| <p><a href="api/scala/index.html#org.apache.spark.rdd.PairRDDFunctions"><code>sampleByKeyExact()</code></a> allows users to |
| sample exactly $\lceil f_k \cdot n_k \rceil \, \forall k \in K$ items, where $f_k$ is the desired |
| fraction for key $k$, $n_k$ is the number of key-value pairs for key $k$, and $K$ is the set of |
| keys. Sampling without replacement requires one additional pass over the RDD to guarantee sample |
| size, whereas sampling with replacement requires two additional passes.</p> |
| |
| <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">import</span> <span class="nn">org.apache.spark.SparkContext</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.SparkContext._</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.rdd.PairRDDFunctions</span> |
| |
| <span class="k">val</span> <span class="n">sc</span><span class="k">:</span> <span class="kt">SparkContext</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="k">val</span> <span class="n">data</span> <span class="k">=</span> <span class="o">...</span> <span class="c1">// an RDD[(K, V)] of any key value pairs</span> |
| <span class="k">val</span> <span class="n">fractions</span><span class="k">:</span> <span class="kt">Map</span><span class="o">[</span><span class="kt">K</span>, <span class="kt">Double</span><span class="o">]</span> <span class="k">=</span> <span class="o">...</span> <span class="c1">// specify the exact fraction desired from each key</span> |
| |
| <span class="c1">// Get an exact sample from each stratum</span> |
| <span class="k">val</span> <span class="n">approxSample</span> <span class="k">=</span> <span class="n">data</span><span class="o">.</span><span class="n">sampleByKey</span><span class="o">(</span><span class="n">withReplacement</span> <span class="k">=</span> <span class="kc">false</span><span class="o">,</span> <span class="n">fractions</span><span class="o">)</span> |
| <span class="k">val</span> <span class="n">exactSample</span> <span class="k">=</span> <span class="n">data</span><span class="o">.</span><span class="n">sampleByKeyExact</span><span class="o">(</span><span class="n">withReplacement</span> <span class="k">=</span> <span class="kc">false</span><span class="o">,</span> <span class="n">fractions</span><span class="o">)</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="java"> |
| <p><a href="api/java/org/apache/spark/api/java/JavaPairRDD.html"><code>sampleByKeyExact()</code></a> allows users to |
| sample exactly $\lceil f_k \cdot n_k \rceil \, \forall k \in K$ items, where $f_k$ is the desired |
| fraction for key $k$, $n_k$ is the number of key-value pairs for key $k$, and $K$ is the set of |
| keys. Sampling without replacement requires one additional pass over the RDD to guarantee sample |
| size, whereas sampling with replacement requires two additional passes.</p> |
| |
| <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="kn">import</span> <span class="nn">java.util.Map</span><span class="o">;</span> |
| |
| <span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaPairRDD</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaSparkContext</span><span class="o">;</span> |
| |
| <span class="n">JavaSparkContext</span> <span class="n">jsc</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="n">JavaPairRDD</span><span class="o"><</span><span class="n">K</span><span class="o">,</span> <span class="n">V</span><span class="o">></span> <span class="n">data</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// an RDD of any key value pairs</span> |
| <span class="n">Map</span><span class="o"><</span><span class="n">K</span><span class="o">,</span> <span class="n">Object</span><span class="o">></span> <span class="n">fractions</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// specify the exact fraction desired from each key</span> |
| |
| <span class="c1">// Get an exact sample from each stratum</span> |
| <span class="n">JavaPairRDD</span><span class="o"><</span><span class="n">K</span><span class="o">,</span> <span class="n">V</span><span class="o">></span> <span class="n">approxSample</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="na">sampleByKey</span><span class="o">(</span><span class="kc">false</span><span class="o">,</span> <span class="n">fractions</span><span class="o">);</span> |
| <span class="n">JavaPairRDD</span><span class="o"><</span><span class="n">K</span><span class="o">,</span> <span class="n">V</span><span class="o">></span> <span class="n">exactSample</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="na">sampleByKeyExact</span><span class="o">(</span><span class="kc">false</span><span class="o">,</span> <span class="n">fractions</span><span class="o">);</span></code></pre></div> |
| |
| </div> |
| <div data-lang="python"> |
| <p><a href="api/python/pyspark.rdd.RDD-class.html#sampleByKey"><code>sampleByKey()</code></a> allows users to |
| sample approximately $\lceil f_k \cdot n_k \rceil \, \forall k \in K$ items, where $f_k$ is the |
| desired fraction for key $k$, $n_k$ is the number of key-value pairs for key $k$, and $K$ is the |
| set of keys.</p> |
| |
| <p><em>Note:</em> <code>sampleByKeyExact()</code> is currently not supported in Python.</p> |
| |
| <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="n">sc</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># SparkContext</span> |
| |
| <span class="n">data</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># an RDD of any key value pairs</span> |
| <span class="n">fractions</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># specify the exact fraction desired from each key as a dictionary</span> |
| |
| <span class="n">approxSample</span> <span class="o">=</span> <span class="n">data</span><span class="o">.</span><span class="n">sampleByKey</span><span class="p">(</span><span class="bp">False</span><span class="p">,</span> <span class="n">fractions</span><span class="p">);</span></code></pre></div> |
| |
| </div> |
| |
| </div> |
| |
| <h2 id="hypothesis-testing">Hypothesis testing</h2> |
| |
| <p>Hypothesis testing is a powerful tool in statistics to determine whether a result is statistically |
| significant, whether this result occurred by chance or not. MLlib currently supports Pearson’s |
| chi-squared ( $\chi^2$) tests for goodness of fit and independence. The input data types determine |
| whether the goodness of fit or the independence test is conducted. The goodness of fit test requires |
| an input type of <code>Vector</code>, whereas the independence test requires a <code>Matrix</code> as input.</p> |
| |
| <p>MLlib also supports the input type <code>RDD[LabeledPoint]</code> to enable feature selection via chi-squared |
| independence tests.</p> |
| |
| <div class="codetabs"> |
| <div data-lang="scala"> |
| <p><a href="api/scala/index.html#org.apache.spark.mllib.stat.Statistics$"><code>Statistics</code></a> provides methods to |
| run Pearson’s chi-squared tests. The following example demonstrates how to run and interpret |
| hypothesis tests.</p> |
| |
| <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">import</span> <span class="nn">org.apache.spark.SparkContext</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.mllib.linalg._</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.mllib.regression.LabeledPoint</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.mllib.stat.Statistics._</span> |
| |
| <span class="k">val</span> <span class="n">sc</span><span class="k">:</span> <span class="kt">SparkContext</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="k">val</span> <span class="n">vec</span><span class="k">:</span> <span class="kt">Vector</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// a vector composed of the frequencies of events</span> |
| |
| <span class="c1">// compute the goodness of fit. If a second vector to test against is not supplied as a parameter, </span> |
| <span class="c1">// the test runs against a uniform distribution. </span> |
| <span class="k">val</span> <span class="n">goodnessOfFitTestResult</span> <span class="k">=</span> <span class="nc">Statistics</span><span class="o">.</span><span class="n">chiSqTest</span><span class="o">(</span><span class="n">vec</span><span class="o">)</span> |
| <span class="n">println</span><span class="o">(</span><span class="n">goodnessOfFitTestResult</span><span class="o">)</span> <span class="c1">// summary of the test including the p-value, degrees of freedom, </span> |
| <span class="c1">// test statistic, the method used, and the null hypothesis.</span> |
| |
| <span class="k">val</span> <span class="n">mat</span><span class="k">:</span> <span class="kt">Matrix</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// a contingency matrix</span> |
| |
| <span class="c1">// conduct Pearson's independence test on the input contingency matrix</span> |
| <span class="k">val</span> <span class="n">independenceTestResult</span> <span class="k">=</span> <span class="nc">Statistics</span><span class="o">.</span><span class="n">chiSqTest</span><span class="o">(</span><span class="n">mat</span><span class="o">)</span> |
| <span class="n">println</span><span class="o">(</span><span class="n">independenceTestResult</span><span class="o">)</span> <span class="c1">// summary of the test including the p-value, degrees of freedom...</span> |
| |
| <span class="k">val</span> <span class="n">obs</span><span class="k">:</span> <span class="kt">RDD</span><span class="o">[</span><span class="kt">LabeledPoint</span><span class="o">]</span> <span class="k">=</span> <span class="o">...</span> <span class="c1">// (feature, label) pairs.</span> |
| |
| <span class="c1">// The contingency table is constructed from the raw (feature, label) pairs and used to conduct</span> |
| <span class="c1">// the independence test. Returns an array containing the ChiSquaredTestResult for every feature </span> |
| <span class="c1">// against the label.</span> |
| <span class="k">val</span> <span class="n">featureTestResults</span><span class="k">:</span> <span class="kt">Array</span><span class="o">[</span><span class="kt">ChiSqTestResult</span><span class="o">]</span> <span class="k">=</span> <span class="nc">Statistics</span><span class="o">.</span><span class="n">chiSqTest</span><span class="o">(</span><span class="n">obs</span><span class="o">)</span> |
| <span class="k">var</span> <span class="n">i</span> <span class="k">=</span> <span class="mi">1</span> |
| <span class="n">featureTestResults</span><span class="o">.</span><span class="n">foreach</span> <span class="o">{</span> <span class="n">result</span> <span class="k">=></span> |
| <span class="n">println</span><span class="o">(</span><span class="n">s</span><span class="s">"Column $i:\n$result"</span><span class="o">)</span> |
| <span class="n">i</span> <span class="o">+=</span> <span class="mi">1</span> |
| <span class="o">}</span> <span class="c1">// summary of the test</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="java"> |
| <p><a href="api/java/org/apache/spark/mllib/stat/Statistics.html"><code>Statistics</code></a> provides methods to |
| run Pearson’s chi-squared tests. The following example demonstrates how to run and interpret |
| hypothesis tests.</p> |
| |
| <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaRDD</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.api.java.JavaSparkContext</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.linalg.*</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.regression.LabeledPoint</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.stat.Statistics</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.mllib.stat.test.ChiSqTestResult</span><span class="o">;</span> |
| |
| <span class="n">JavaSparkContext</span> <span class="n">jsc</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="n">Vector</span> <span class="n">vec</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// a vector composed of the frequencies of events</span> |
| |
| <span class="c1">// compute the goodness of fit. If a second vector to test against is not supplied as a parameter, </span> |
| <span class="c1">// the test runs against a uniform distribution. </span> |
| <span class="n">ChiSqTestResult</span> <span class="n">goodnessOfFitTestResult</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="na">chiSqTest</span><span class="o">(</span><span class="n">vec</span><span class="o">);</span> |
| <span class="c1">// summary of the test including the p-value, degrees of freedom, test statistic, the method used, </span> |
| <span class="c1">// and the null hypothesis.</span> |
| <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">goodnessOfFitTestResult</span><span class="o">);</span> |
| |
| <span class="n">Matrix</span> <span class="n">mat</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// a contingency matrix</span> |
| |
| <span class="c1">// conduct Pearson's independence test on the input contingency matrix</span> |
| <span class="n">ChiSqTestResult</span> <span class="n">independenceTestResult</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="na">chiSqTest</span><span class="o">(</span><span class="n">mat</span><span class="o">);</span> |
| <span class="c1">// summary of the test including the p-value, degrees of freedom...</span> |
| <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">independenceTestResult</span><span class="o">);</span> |
| |
| <span class="n">JavaRDD</span><span class="o"><</span><span class="n">LabeledPoint</span><span class="o">></span> <span class="n">obs</span> <span class="o">=</span> <span class="o">...</span> <span class="c1">// an RDD of labeled points</span> |
| |
| <span class="c1">// The contingency table is constructed from the raw (feature, label) pairs and used to conduct</span> |
| <span class="c1">// the independence test. Returns an array containing the ChiSquaredTestResult for every feature </span> |
| <span class="c1">// against the label.</span> |
| <span class="n">ChiSqTestResult</span><span class="o">[]</span> <span class="n">featureTestResults</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="na">chiSqTest</span><span class="o">(</span><span class="n">obs</span><span class="o">.</span><span class="na">rdd</span><span class="o">());</span> |
| <span class="kt">int</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">1</span><span class="o">;</span> |
| <span class="k">for</span> <span class="o">(</span><span class="n">ChiSqTestResult</span> <span class="n">result</span> <span class="o">:</span> <span class="n">featureTestResults</span><span class="o">)</span> <span class="o">{</span> |
| <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="s">"Column "</span> <span class="o">+</span> <span class="n">i</span> <span class="o">+</span> <span class="s">":"</span><span class="o">);</span> |
| <span class="n">System</span><span class="o">.</span><span class="na">out</span><span class="o">.</span><span class="na">println</span><span class="o">(</span><span class="n">result</span><span class="o">);</span> <span class="c1">// summary of the test</span> |
| <span class="n">i</span><span class="o">++;</span> |
| <span class="o">}</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="python"> |
| <p><a href="api/python/index.html#pyspark.mllib.stat.Statistics$"><code>Statistics</code></a> provides methods to |
| run Pearson’s chi-squared tests. The following example demonstrates how to run and interpret |
| hypothesis tests.</p> |
| |
| <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="kn">from</span> <span class="nn">pyspark</span> <span class="kn">import</span> <span class="n">SparkContext</span> |
| <span class="kn">from</span> <span class="nn">pyspark.mllib.linalg</span> <span class="kn">import</span> <span class="n">Vectors</span><span class="p">,</span> <span class="n">Matrices</span> |
| <span class="kn">from</span> <span class="nn">pyspark.mllib.regresssion</span> <span class="kn">import</span> <span class="n">LabeledPoint</span> |
| <span class="kn">from</span> <span class="nn">pyspark.mllib.stat</span> <span class="kn">import</span> <span class="n">Statistics</span> |
| |
| <span class="n">sc</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="p">()</span> |
| |
| <span class="n">vec</span> <span class="o">=</span> <span class="n">Vectors</span><span class="o">.</span><span class="n">dense</span><span class="p">(</span><span class="o">...</span><span class="p">)</span> <span class="c"># a vector composed of the frequencies of events</span> |
| |
| <span class="c"># compute the goodness of fit. If a second vector to test against is not supplied as a parameter,</span> |
| <span class="c"># the test runs against a uniform distribution.</span> |
| <span class="n">goodnessOfFitTestResult</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="n">chiSqTest</span><span class="p">(</span><span class="n">vec</span><span class="p">)</span> |
| <span class="k">print</span> <span class="n">goodnessOfFitTestResult</span> <span class="c"># summary of the test including the p-value, degrees of freedom,</span> |
| <span class="c"># test statistic, the method used, and the null hypothesis.</span> |
| |
| <span class="n">mat</span> <span class="o">=</span> <span class="n">Matrices</span><span class="o">.</span><span class="n">dense</span><span class="p">(</span><span class="o">...</span><span class="p">)</span> <span class="c"># a contingency matrix</span> |
| |
| <span class="c"># conduct Pearson's independence test on the input contingency matrix</span> |
| <span class="n">independenceTestResult</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="n">chiSqTest</span><span class="p">(</span><span class="n">mat</span><span class="p">)</span> |
| <span class="k">print</span> <span class="n">independenceTestResult</span> <span class="c"># summary of the test including the p-value, degrees of freedom...</span> |
| |
| <span class="n">obs</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="o">...</span><span class="p">)</span> <span class="c"># LabeledPoint(feature, label) .</span> |
| |
| <span class="c"># The contingency table is constructed from an RDD of LabeledPoint and used to conduct</span> |
| <span class="c"># the independence test. Returns an array containing the ChiSquaredTestResult for every feature</span> |
| <span class="c"># against the label.</span> |
| <span class="n">featureTestResults</span> <span class="o">=</span> <span class="n">Statistics</span><span class="o">.</span><span class="n">chiSqTest</span><span class="p">(</span><span class="n">obs</span><span class="p">)</span> |
| |
| <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">result</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">featureTestResults</span><span class="p">):</span> |
| <span class="k">print</span> <span class="s">"Column $d:"</span> <span class="o">%</span> <span class="p">(</span><span class="n">i</span> <span class="o">+</span> <span class="mi">1</span><span class="p">)</span> |
| <span class="k">print</span> <span class="n">result</span></code></pre></div> |
| |
| </div> |
| |
| </div> |
| |
| <h2 id="random-data-generation">Random data generation</h2> |
| |
| <p>Random data generation is useful for randomized algorithms, prototyping, and performance testing. |
| MLlib supports generating random RDDs with i.i.d. values drawn from a given distribution: |
| uniform, standard normal, or Poisson.</p> |
| |
| <div class="codetabs"> |
| <div data-lang="scala"> |
| <p><a href="api/scala/index.html#org.apache.spark.mllib.random.RandomRDDs"><code>RandomRDDs</code></a> provides factory |
| methods to generate random double RDDs or vector RDDs. |
| The following example generates a random double RDD, whose values follows the standard normal |
| distribution <code>N(0, 1)</code>, and then map it to <code>N(1, 4)</code>.</p> |
| |
| <div class="highlight"><pre><code class="language-scala" data-lang="scala"><span class="k">import</span> <span class="nn">org.apache.spark.SparkContext</span> |
| <span class="k">import</span> <span class="nn">org.apache.spark.mllib.random.RandomRDDs._</span> |
| |
| <span class="k">val</span> <span class="n">sc</span><span class="k">:</span> <span class="kt">SparkContext</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="c1">// Generate a random double RDD that contains 1 million i.i.d. values drawn from the</span> |
| <span class="c1">// standard normal distribution `N(0, 1)`, evenly distributed in 10 partitions.</span> |
| <span class="k">val</span> <span class="n">u</span> <span class="k">=</span> <span class="n">normalRDD</span><span class="o">(</span><span class="n">sc</span><span class="o">,</span> <span class="mi">1000000L</span><span class="o">,</span> <span class="mi">10</span><span class="o">)</span> |
| <span class="c1">// Apply a transform to get a random double RDD following `N(1, 4)`.</span> |
| <span class="k">val</span> <span class="n">v</span> <span class="k">=</span> <span class="n">u</span><span class="o">.</span><span class="n">map</span><span class="o">(</span><span class="n">x</span> <span class="k">=></span> <span class="mf">1.0</span> <span class="o">+</span> <span class="mf">2.0</span> <span class="o">*</span> <span class="n">x</span><span class="o">)</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="java"> |
| <p><a href="api/java/index.html#org.apache.spark.mllib.random.RandomRDDs"><code>RandomRDDs</code></a> provides factory |
| methods to generate random double RDDs or vector RDDs. |
| The following example generates a random double RDD, whose values follows the standard normal |
| distribution <code>N(0, 1)</code>, and then map it to <code>N(1, 4)</code>.</p> |
| |
| <div class="highlight"><pre><code class="language-java" data-lang="java"><span class="kn">import</span> <span class="nn">org.apache.spark.SparkContext</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">org.apache.spark.api.JavaDoubleRDD</span><span class="o">;</span> |
| <span class="kn">import</span> <span class="nn">static</span> <span class="n">org</span><span class="o">.</span><span class="na">apache</span><span class="o">.</span><span class="na">spark</span><span class="o">.</span><span class="na">mllib</span><span class="o">.</span><span class="na">random</span><span class="o">.</span><span class="na">RandomRDDs</span><span class="o">.*;</span> |
| |
| <span class="n">JavaSparkContext</span> <span class="n">jsc</span> <span class="o">=</span> <span class="o">...</span> |
| |
| <span class="c1">// Generate a random double RDD that contains 1 million i.i.d. values drawn from the</span> |
| <span class="c1">// standard normal distribution `N(0, 1)`, evenly distributed in 10 partitions.</span> |
| <span class="n">JavaDoubleRDD</span> <span class="n">u</span> <span class="o">=</span> <span class="n">normalJavaRDD</span><span class="o">(</span><span class="n">jsc</span><span class="o">,</span> <span class="mi">1000000L</span><span class="o">,</span> <span class="mi">10</span><span class="o">);</span> |
| <span class="c1">// Apply a transform to get a random double RDD following `N(1, 4)`.</span> |
| <span class="n">JavaDoubleRDD</span> <span class="n">v</span> <span class="o">=</span> <span class="n">u</span><span class="o">.</span><span class="na">map</span><span class="o">(</span> |
| <span class="k">new</span> <span class="n">Function</span><span class="o"><</span><span class="n">Double</span><span class="o">,</span> <span class="n">Double</span><span class="o">>()</span> <span class="o">{</span> |
| <span class="kd">public</span> <span class="n">Double</span> <span class="nf">call</span><span class="o">(</span><span class="n">Double</span> <span class="n">x</span><span class="o">)</span> <span class="o">{</span> |
| <span class="k">return</span> <span class="mf">1.0</span> <span class="o">+</span> <span class="mf">2.0</span> <span class="o">*</span> <span class="n">x</span><span class="o">;</span> |
| <span class="o">}</span> |
| <span class="o">});</span></code></pre></div> |
| |
| </div> |
| |
| <div data-lang="python"> |
| <p><a href="api/python/pyspark.mllib.random.RandomRDDs-class.html"><code>RandomRDDs</code></a> provides factory |
| methods to generate random double RDDs or vector RDDs. |
| The following example generates a random double RDD, whose values follows the standard normal |
| distribution <code>N(0, 1)</code>, and then map it to <code>N(1, 4)</code>.</p> |
| |
| <div class="highlight"><pre><code class="language-python" data-lang="python"><span class="kn">from</span> <span class="nn">pyspark.mllib.random</span> <span class="kn">import</span> <span class="n">RandomRDDs</span> |
| |
| <span class="n">sc</span> <span class="o">=</span> <span class="o">...</span> <span class="c"># SparkContext</span> |
| |
| <span class="c"># Generate a random double RDD that contains 1 million i.i.d. values drawn from the</span> |
| <span class="c"># standard normal distribution `N(0, 1)`, evenly distributed in 10 partitions.</span> |
| <span class="n">u</span> <span class="o">=</span> <span class="n">RandomRDDs</span><span class="o">.</span><span class="n">uniformRDD</span><span class="p">(</span><span class="n">sc</span><span class="p">,</span> <span class="il">1000000L</span><span class="p">,</span> <span class="mi">10</span><span class="p">)</span> |
| <span class="c"># Apply a transform to get a random double RDD following `N(1, 4)`.</span> |
| <span class="n">v</span> <span class="o">=</span> <span class="n">u</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="mf">1.0</span> <span class="o">+</span> <span class="mf">2.0</span> <span class="o">*</span> <span class="n">x</span><span class="p">)</span></code></pre></div> |
| |
| </div> |
| |
| </div> |
| |
| |
| </div> <!-- /container --> |
| |
| <script src="js/vendor/jquery-1.8.0.min.js"></script> |
| <script src="js/vendor/bootstrap.min.js"></script> |
| <script src="js/main.js"></script> |
| |
| <!-- MathJax Section --> |
| <script type="text/x-mathjax-config"> |
| MathJax.Hub.Config({ |
| TeX: { equationNumbers: { autoNumber: "AMS" } } |
| }); |
| </script> |
| <script> |
| // Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS. |
| // We could use "//cdn.mathjax...", but that won't support "file://". |
| (function(d, script) { |
| script = d.createElement('script'); |
| script.type = 'text/javascript'; |
| script.async = true; |
| script.onload = function(){ |
| MathJax.Hub.Config({ |
| tex2jax: { |
| inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ], |
| displayMath: [ ["$$","$$"], ["\\[", "\\]"] ], |
| processEscapes: true, |
| skipTags: ['script', 'noscript', 'style', 'textarea', 'pre'] |
| } |
| }); |
| }; |
| script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') + |
| 'cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'; |
| d.getElementsByTagName('head')[0].appendChild(script); |
| }(document)); |
| </script> |
| </body> |
| </html> |