blob: 4c728de371915ecd9ba746803eb8936593b48f54 [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>JSON Files - Spark 3.0.0-preview2 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 60px;
padding-bottom: 40px;
}
</style>
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="css/bootstrap-responsive.min.css">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<!-- Google analytics script -->
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-32518208-2']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
</head>
<body>
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<div class="navbar navbar-fixed-top" id="topbar">
<div class="navbar-inner">
<div class="container">
<div class="brand"><a href="index.html">
<img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">3.0.0-preview2</span>
</div>
<ul class="nav">
<!--TODO(andyk): Add class="active" attribute to li some how.-->
<li><a href="index.html">Overview</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Programming Guides<b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="quick-start.html">Quick Start</a></li>
<li><a href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a></li>
<li><a href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a></li>
<li><a href="structured-streaming-programming-guide.html">Structured Streaming</a></li>
<li><a href="streaming-programming-guide.html">Spark Streaming (DStreams)</a></li>
<li><a href="ml-guide.html">MLlib (Machine Learning)</a></li>
<li><a href="graphx-programming-guide.html">GraphX (Graph Processing)</a></li>
<li><a href="sparkr.html">SparkR (R on Spark)</a></li>
</ul>
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">API Docs<b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="api/scala/index.html#org.apache.spark.package">Scala</a></li>
<li><a href="api/java/index.html">Java</a></li>
<li><a href="api/python/index.html">Python</a></li>
<li><a href="api/R/index.html">R</a></li>
<li><a href="api/sql/index.html">SQL, Built-in Functions</a></li>
</ul>
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Deploying<b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="cluster-overview.html">Overview</a></li>
<li><a href="submitting-applications.html">Submitting Applications</a></li>
<li class="divider"></li>
<li><a href="spark-standalone.html">Spark Standalone</a></li>
<li><a href="running-on-mesos.html">Mesos</a></li>
<li><a href="running-on-yarn.html">YARN</a></li>
<li><a href="running-on-kubernetes.html">Kubernetes</a></li>
</ul>
</li>
<li class="dropdown">
<a href="api.html" class="dropdown-toggle" data-toggle="dropdown">More<b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="configuration.html">Configuration</a></li>
<li><a href="monitoring.html">Monitoring</a></li>
<li><a href="tuning.html">Tuning Guide</a></li>
<li><a href="job-scheduling.html">Job Scheduling</a></li>
<li><a href="security.html">Security</a></li>
<li><a href="hardware-provisioning.html">Hardware Provisioning</a></li>
<li><a href="migration-guide.html">Migration Guide</a></li>
<li class="divider"></li>
<li><a href="building-spark.html">Building Spark</a></li>
<li><a href="https://spark.apache.org/contributing.html">Contributing to Spark</a></li>
<li><a href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a></li>
</ul>
</li>
</ul>
<!--<p class="navbar-text pull-right"><span class="version-text">v3.0.0-preview2</span></p>-->
</div>
</div>
</div>
<div class="container-wrapper">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="sql-programming-guide.html">Spark SQL Guide</a></h3>
<ul>
<li>
<a href="sql-getting-started.html">
Getting Started
</a>
</li>
<li>
<a href="sql-data-sources.html">
Data Sources
</a>
</li>
<ul>
<li>
<a href="sql-data-sources-load-save-functions.html">
Generic Load/Save Functions
</a>
</li>
<li>
<a href="sql-data-sources-parquet.html">
Parquet Files
</a>
</li>
<li>
<a href="sql-data-sources-orc.html">
ORC Files
</a>
</li>
<li>
<a href="sql-data-sources-json.html">
<b>JSON Files</b>
</a>
</li>
<li>
<a href="sql-data-sources-hive-tables.html">
Hive Tables
</a>
</li>
<li>
<a href="sql-data-sources-jdbc.html">
JDBC To Other Databases
</a>
</li>
<li>
<a href="sql-data-sources-avro.html">
Avro Files
</a>
</li>
<li>
<a href="sql-data-sources-troubleshooting.html">
Troubleshooting
</a>
</li>
</ul>
<li>
<a href="sql-performance-tuning.html">
Performance Tuning
</a>
</li>
<li>
<a href="sql-distributed-sql-engine.html">
Distributed SQL Engine
</a>
</li>
<li>
<a href="sql-pyspark-pandas-with-arrow.html">
PySpark Usage Guide for Pandas with Apache Arrow
</a>
</li>
<li>
<a href="sql-migration-old.html">
Migration Guide
</a>
</li>
<li>
<a href="sql-ref.html">
SQL Reference
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar" id="content">
<h1 class="title">JSON Files</h1>
<div class="codetabs">
<div data-lang="scala">
<p>Spark SQL can automatically infer the schema of a JSON dataset and load it as a <code class="highlighter-rouge">Dataset[Row]</code>.
This conversion can be done using <code class="highlighter-rouge">SparkSession.read.json()</code> on either a <code class="highlighter-rouge">Dataset[String]</code>,
or a JSON file.</p>
<p>Note that the file that is offered as <em>a json file</em> is not a typical JSON file. Each
line must contain a separate, self-contained valid JSON object. For more information, please see
<a href="http://jsonlines.org/">JSON Lines text format, also called newline-delimited JSON</a>.</p>
<p>For a regular multi-line JSON file, set the <code class="highlighter-rouge">multiLine</code> option to <code class="highlighter-rouge">true</code>.</p>
<p><span class="c1">// Primitive types (Int, String, etc) and Product types (case classes) encoders are</span>
<span class="c1">// supported by importing this when creating a Dataset.</span>
<span class="k">import</span> <span class="nn">spark.implicits._</span></p>
<p><span class="c1">// A JSON dataset is pointed to by path.</span>
<span class="c1">// The path can be either a single text file or a directory storing text files</span>
<span class="k">val</span> <span class="nv">path</span> <span class="k">=</span> <span class="s">&#8220;examples/src/main/resources/people.json&#8221;</span>
<span class="k">val</span> <span class="nv">peopleDF</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">json</span><span class="o">(</span><span class="n">path</span><span class="o">)</span></p>
<p><span class="c1">// The inferred schema can be visualized using the printSchema() method</span>
<span class="nv">peopleDF</span><span class="o">.</span><span class="py">printSchema</span><span class="o">()</span>
<span class="c1">// root</span>
<span class="c1">// |&#8211; age: long (nullable = true)</span>
<span class="c1">// |&#8211; name: string (nullable = true)</span></p>
<p><span class="c1">// Creates a temporary view using the DataFrame</span>
<span class="nv">peopleDF</span><span class="o">.</span><span class="py">createOrReplaceTempView</span><span class="o">(</span><span class="s">&#8220;people&#8221;</span><span class="o">)</span></p>
<p><span class="c1">// SQL statements can be run by using the sql methods provided by spark</span>
<span class="k">val</span> <span class="nv">teenagerNamesDF</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">sql</span><span class="o">(</span><span class="s">&#8220;SELECT name FROM people WHERE age BETWEEN 13 AND 19&#8221;</span><span class="o">)</span>
<span class="nv">teenagerNamesDF</span><span class="o">.</span><span class="py">show</span><span class="o">()</span>
<span class="c1">// +&#8212;&#8212;+</span>
<span class="c1">// | name|</span>
<span class="c1">// +&#8212;&#8212;+</span>
<span class="c1">// |Justin|</span>
<span class="c1">// +&#8212;&#8212;+</span></p>
<p><span class="c1">// Alternatively, a DataFrame can be created for a JSON dataset represented by</span>
<span class="c1">// a Dataset[String] storing one JSON object per string</span>
<span class="k">val</span> <span class="nv">otherPeopleDataset</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">createDataset</span><span class="o">(</span>
<span class="s">&#8221;&#8221;&#8221;{&#8220;name&#8221;:&#8221;Yin&#8221;,&#8221;address&#8221;:{&#8220;city&#8221;:&#8221;Columbus&#8221;,&#8221;state&#8221;:&#8221;Ohio&#8221;}}&#8221;&#8221;&#8221;</span> <span class="o">::</span> <span class="nc">Nil</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">otherPeople</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">json</span><span class="o">(</span><span class="n">otherPeopleDataset</span><span class="o">)</span>
<span class="nv">otherPeople</span><span class="o">.</span><span class="py">show</span><span class="o">()</span>
<span class="c1">// +&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</span>
<span class="c1">// | address|name|</span>
<span class="c1">// +&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</span>
<span class="c1">// |[Columbus,Ohio]| Yin|</span>
<span class="c1">// +&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</span></p>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<p>Spark SQL can automatically infer the schema of a JSON dataset and load it as a <code class="highlighter-rouge">Dataset&lt;Row&gt;</code>.
This conversion can be done using <code class="highlighter-rouge">SparkSession.read().json()</code> on either a <code class="highlighter-rouge">Dataset&lt;String&gt;</code>,
or a JSON file.</p>
<p>Note that the file that is offered as <em>a json file</em> is not a typical JSON file. Each
line must contain a separate, self-contained valid JSON object. For more information, please see
<a href="http://jsonlines.org/">JSON Lines text format, also called newline-delimited JSON</a>.</p>
<p>For a regular multi-line JSON file, set the <code class="highlighter-rouge">multiLine</code> option to <code class="highlighter-rouge">true</code>.</p>
<p><span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span></p>
<p><span class="c1">// A JSON dataset is pointed to by path.</span>
<span class="c1">// The path can be either a single text file or a directory storing text files</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">people</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">json</span><span class="o">(</span><span class="s">&#8220;examples/src/main/resources/people.json&#8221;</span><span class="o">);</span></p>
<p><span class="c1">// The inferred schema can be visualized using the printSchema() method</span>
<span class="n">people</span><span class="o">.</span><span class="na">printSchema</span><span class="o">();</span>
<span class="c1">// root</span>
<span class="c1">// |&#8211; age: long (nullable = true)</span>
<span class="c1">// |&#8211; name: string (nullable = true)</span></p>
<p><span class="c1">// Creates a temporary view using the DataFrame</span>
<span class="n">people</span><span class="o">.</span><span class="na">createOrReplaceTempView</span><span class="o">(</span><span class="s">&#8220;people&#8221;</span><span class="o">);</span></p>
<p><span class="c1">// SQL statements can be run by using the sql methods provided by spark</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">namesDF</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">sql</span><span class="o">(</span><span class="s">&#8220;SELECT name FROM people WHERE age BETWEEN 13 AND 19&#8221;</span><span class="o">);</span>
<span class="n">namesDF</span><span class="o">.</span><span class="na">show</span><span class="o">();</span>
<span class="c1">// +&#8212;&#8212;+</span>
<span class="c1">// | name|</span>
<span class="c1">// +&#8212;&#8212;+</span>
<span class="c1">// |Justin|</span>
<span class="c1">// +&#8212;&#8212;+</span></p>
<p><span class="c1">// Alternatively, a DataFrame can be created for a JSON dataset represented by</span>
<span class="c1">// a Dataset&lt;String&gt; storing one JSON object per string.</span>
<span class="nc">List</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">&gt;</span> <span class="n">jsonData</span> <span class="o">=</span> <span class="nc">Arrays</span><span class="o">.</span><span class="na">asList</span><span class="o">(</span>
<span class="s">&#8221;{"name":"Yin","address":{"city":"Columbus","state":"Ohio"}}&#8221;</span><span class="o">);</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">&gt;</span> <span class="n">anotherPeopleDataset</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">createDataset</span><span class="o">(</span><span class="n">jsonData</span><span class="o">,</span> <span class="nc">Encoders</span><span class="o">.</span><span class="na">STRING</span><span class="o">());</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">anotherPeople</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">json</span><span class="o">(</span><span class="n">anotherPeopleDataset</span><span class="o">);</span>
<span class="n">anotherPeople</span><span class="o">.</span><span class="na">show</span><span class="o">();</span>
<span class="c1">// +&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</span>
<span class="c1">// | address|name|</span>
<span class="c1">// +&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</span>
<span class="c1">// |[Columbus,Ohio]| Yin|</span>
<span class="c1">// +&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</span></p>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="python">
<p>Spark SQL can automatically infer the schema of a JSON dataset and load it as a DataFrame.
This conversion can be done using <code class="highlighter-rouge">SparkSession.read.json</code> on a JSON file.</p>
<p>Note that the file that is offered as <em>a json file</em> is not a typical JSON file. Each
line must contain a separate, self-contained valid JSON object. For more information, please see
<a href="http://jsonlines.org/">JSON Lines text format, also called newline-delimited JSON</a>.</p>
<p>For a regular multi-line JSON file, set the <code class="highlighter-rouge">multiLine</code> parameter to <code class="highlighter-rouge">True</code>.</p>
<p><span class="c1"># spark is from the previous example.
</span><span class="n">sc</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="n">sparkContext</span></p>
<p><span class="c1"># A JSON dataset is pointed to by path.</span></p>
<h1 id="the-path-can-be-either-a-single-text-file-or-a-directory-storing-text-files">The path can be either a single text file or a directory storing text files</h1>
<p>&lt;/span&gt;<span class="n">path</span> <span class="o">=</span> <span class="s">&#8220;examples/src/main/resources/people.json&#8221;</span>
<span class="n">peopleDF</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="n">read</span><span class="o">.</span><span class="n">json</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></p>
<p><span class="c1"># The inferred schema can be visualized using the printSchema() method
</span><span class="n">peopleDF</span><span class="o">.</span><span class="n">printSchema</span><span class="p">()</span>
<span class="c1"># root</span></p>
<h1 id="age-long-nullable--true">|&#8211; age: long (nullable = true)</h1>
<h1 id="name-string-nullable--true">|&#8211; name: string (nullable = true)</h1>
<p>&lt;/span&gt;
<span class="c1"># Creates a temporary view using the DataFrame
</span><span class="n">peopleDF</span><span class="o">.</span><span class="n">createOrReplaceTempView</span><span class="p">(</span><span class="s">&#8220;people&#8221;</span><span class="p">)</span></p>
<p><span class="c1"># SQL statements can be run by using the sql methods provided by spark
</span><span class="n">teenagerNamesDF</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="n">sql</span><span class="p">(</span><span class="s">&#8220;SELECT name FROM people WHERE age BETWEEN 13 AND 19&#8221;</span><span class="p">)</span>
<span class="n">teenagerNamesDF</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
<span class="c1"># +&#8212;&#8212;+</span></p>
<h1 id="name">| name|</h1>
<h1 id="section">+&#8212;&#8212;+</h1>
<h1 id="justin">|Justin|</h1>
<h1 id="section-1">+&#8212;&#8212;+</h1>
<p>&lt;/span&gt;
<span class="c1"># Alternatively, a DataFrame can be created for a JSON dataset represented by</span></p>
<h1 id="an-rddstring-storing-one-json-object-per-string">an RDD[String] storing one JSON object per string</h1>
<p>&lt;/span&gt;<span class="n">jsonStrings</span> <span class="o">=</span> <span class="p">[</span><span class="s">&#8217;{&#8220;name&#8221;:&#8221;Yin&#8221;,&#8221;address&#8221;:{&#8220;city&#8221;:&#8221;Columbus&#8221;,&#8221;state&#8221;:&#8221;Ohio&#8221;}}&#8217;</span><span class="p">]</span>
<span class="n">otherPeopleRDD</span> <span class="o">=</span> <span class="n">sc</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="n">jsonStrings</span><span class="p">)</span>
<span class="n">otherPeople</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="n">read</span><span class="o">.</span><span class="n">json</span><span class="p">(</span><span class="n">otherPeopleRDD</span><span class="p">)</span>
<span class="n">otherPeople</span><span class="o">.</span><span class="n">show</span><span class="p">()</span>
<span class="c1"># +&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</span></p>
<h1 id="addressname">| address|name|</h1>
<h1 id="section-2">+&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</h1>
<h1 id="columbusohio-yin">|[Columbus,Ohio]| Yin|</h1>
<h1 id="section-3">+&#8212;&#8212;&#8212;&#8212;&#8212;+&#8212;-+</h1>
<p>&lt;/span&gt;</p>
<div><small>Find full example code at "examples/src/main/python/sql/datasource.py" in the Spark repo.</small></div>
</div>
<div data-lang="r">
<p>Spark SQL can automatically infer the schema of a JSON dataset and load it as a DataFrame. using
the <code class="highlighter-rouge">read.json()</code> function, which loads data from a directory of JSON files where each line of the
files is a JSON object.</p>
<p>Note that the file that is offered as <em>a json file</em> is not a typical JSON file. Each
line must contain a separate, self-contained valid JSON object. For more information, please see
<a href="http://jsonlines.org/">JSON Lines text format, also called newline-delimited JSON</a>.</p>
<p>For a regular multi-line JSON file, set a named parameter <code class="highlighter-rouge">multiLine</code> to <code class="highlighter-rouge">TRUE</code>.</p>
<p><span class="c1"># A JSON dataset is pointed to by path.</span><span class="w">
</span><span class="c1"># The path can be either a single text file or a directory storing text files.</span><span class="w">
</span><span class="n">path</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="s2">&#8220;examples/src/main/resources/people.json&#8221;</span><span class="w">
</span><span class="c1"># Create a DataFrame from the file(s) pointed to by path</span><span class="w">
</span><span class="n">people</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">read.json</span><span class="p">(</span><span class="n">path</span><span class="p">)</span><span class="w"></span></p>
<p>&lt;/span&gt;<span class="c1"># The inferred schema can be visualized using the printSchema() method.</span><span class="w">
</span><span class="n">printSchema</span><span class="p">(</span><span class="n">people</span><span class="p">)</span><span class="w">
</span><span class="c1">## root</span><span class="w">
</span><span class="c1">## |&#8211; age: long (nullable = true)</span><span class="w">
</span><span class="c1">## |&#8211; name: string (nullable = true)</span><span class="w"></span></p>
<p>&lt;/span&gt;<span class="c1"># Register this DataFrame as a table.</span><span class="w">
</span><span class="n">createOrReplaceTempView</span><span class="p">(</span><span class="n">people</span><span class="p">,</span><span class="w"> </span><span class="s2">&#8220;people&#8221;</span><span class="p">)</span><span class="w"></span></p>
<p>&lt;/span&gt;<span class="c1"># SQL statements can be run by using the sql methods.</span><span class="w">
</span><span class="n">teenagers</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">sql</span><span class="p">(</span><span class="s2">&#8220;SELECT name FROM people WHERE age &gt;= 13 AND age &lt;= 19&#8221;</span><span class="p">)</span><span class="w">
</span><span class="n">head</span><span class="p">(</span><span class="n">teenagers</span><span class="p">)</span><span class="w">
</span><span class="c1">## name</span><span class="w">
</span><span class="c1">## 1 Justin</span><span class="w"></span></p>
<p>&lt;/span&gt;&lt;div&gt;<small>Find full example code at &#8220;examples/src/main/r/RSparkSQLExample.R&#8221; in the Spark repo.</small>&lt;/div&gt;</p>
</div>
<div data-lang="sql">
<figure class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">CREATE</span> <span class="k">TEMPORARY</span> <span class="k">VIEW</span> <span class="n">jsonTable</span>
<span class="k">USING</span> <span class="n">org</span><span class="p">.</span><span class="n">apache</span><span class="p">.</span><span class="n">spark</span><span class="p">.</span><span class="k">sql</span><span class="p">.</span><span class="n">json</span>
<span class="k">OPTIONS</span> <span class="p">(</span>
<span class="n">path</span> <span class="nv">"examples/src/main/resources/people.json"</span>
<span class="p">)</span>
<span class="k">SELECT</span> <span class="o">*</span> <span class="k">FROM</span> <span class="n">jsonTable</span></code></pre></figure>
</div>
</div>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.4.1.min.js"></script>
<script src="js/vendor/bootstrap.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>