blob: 56937c4602b2e91c4482740b589951d8abaffce2 [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>ORC Files - Spark 3.2.4 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 60px;
padding-bottom: 40px;
}
</style>
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
<link rel="stylesheet" href="css/docsearch.css">
<!-- Matomo -->
<script type="text/javascript">
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body>
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar fixed-top navbar-expand-md navbar-light bg-light" id="topbar">
<div class="container">
<div class="navbar-header">
<div class="navbar-brand"><a href="index.html">
<img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">3.2.4</span>
</div>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav">
<!--TODO(andyk): Add class="active" attribute to li some how.-->
<li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="quick-start.html">Quick Start</a>
<a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a>
<a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="api/java/index.html">Java</a>
<a class="dropdown-item" href="api/python/index.html">Python</a>
<a class="dropdown-item" href="api/R/index.html">R</a>
<a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="cluster-overview.html">Overview</a>
<a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="running-on-mesos.html">Mesos</a>
<a class="dropdown-item" href="running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="configuration.html">Configuration</a>
<a class="dropdown-item" href="monitoring.html">Monitoring</a>
<a class="dropdown-item" href="tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="security.html">Security</a>
<a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v3.2.4</span></span>-->
</div>
</div>
</nav>
<div class="container-wrapper">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="sql-programming-guide.html">Spark SQL Guide</a></h3>
<ul>
<li>
<a href="sql-getting-started.html">
Getting Started
</a>
</li>
<li>
<a href="sql-data-sources.html">
Data Sources
</a>
</li>
<ul>
<li>
<a href="sql-data-sources-load-save-functions.html">
Generic Load/Save Functions
</a>
</li>
<li>
<a href="sql-data-sources-generic-options.html">
Generic File Source Options
</a>
</li>
<li>
<a href="sql-data-sources-parquet.html">
Parquet Files
</a>
</li>
<li>
<a href="sql-data-sources-orc.html">
<b>ORC Files</b>
</a>
</li>
<li>
<a href="sql-data-sources-json.html">
JSON Files
</a>
</li>
<li>
<a href="sql-data-sources-csv.html">
CSV Files
</a>
</li>
<li>
<a href="sql-data-sources-text.html">
Text Files
</a>
</li>
<li>
<a href="sql-data-sources-hive-tables.html">
Hive Tables
</a>
</li>
<li>
<a href="sql-data-sources-jdbc.html">
JDBC To Other Databases
</a>
</li>
<li>
<a href="sql-data-sources-avro.html">
Avro Files
</a>
</li>
<li>
<a href="sql-data-sources-binaryFile.html">
Whole Binary Files
</a>
</li>
<li>
<a href="sql-data-sources-troubleshooting.html">
Troubleshooting
</a>
</li>
</ul>
<li>
<a href="sql-performance-tuning.html">
Performance Tuning
</a>
</li>
<li>
<a href="sql-distributed-sql-engine.html">
Distributed SQL Engine
</a>
</li>
<li>
<a href="sql-pyspark-pandas-with-arrow.html">
PySpark Usage Guide for Pandas with Apache Arrow
</a>
</li>
<li>
<a href="sql-migration-old.html">
Migration Guide
</a>
</li>
<li>
<a href="sql-ref.html">
SQL Reference
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar mr-3" id="content">
<h1 class="title">ORC Files</h1>
<ul id="markdown-toc">
<li><a href="#orc-implementation" id="markdown-toc-orc-implementation">ORC Implementation</a></li>
<li><a href="#vectorized-reader" id="markdown-toc-vectorized-reader">Vectorized Reader</a></li>
<li><a href="#schema-merging" id="markdown-toc-schema-merging">Schema Merging</a></li>
<li><a href="#zstandard" id="markdown-toc-zstandard">Zstandard</a></li>
<li><a href="#bloom-filters" id="markdown-toc-bloom-filters">Bloom Filters</a></li>
<li><a href="#columnar-encryption" id="markdown-toc-columnar-encryption">Columnar Encryption</a></li>
<li><a href="#hive-metastore-orc-table-conversion" id="markdown-toc-hive-metastore-orc-table-conversion">Hive metastore ORC table conversion</a></li>
<li><a href="#configuration" id="markdown-toc-configuration">Configuration</a></li>
<li><a href="#data-source-option" id="markdown-toc-data-source-option">Data Source Option</a></li>
</ul>
<p><a href="https://orc.apache.org">Apache ORC</a> is a columnar format which has more advanced features like native zstd compression, bloom filter and columnar encryption.</p>
<h3 id="orc-implementation">ORC Implementation</h3>
<p>Spark supports two ORC implementations (<code class="language-plaintext highlighter-rouge">native</code> and <code class="language-plaintext highlighter-rouge">hive</code>) which is controlled by <code class="language-plaintext highlighter-rouge">spark.sql.orc.impl</code>.
Two implementations share most functionalities with different design goals.</p>
<ul>
<li><code class="language-plaintext highlighter-rouge">native</code> implementation is designed to follow Spark&#8217;s data source behavior like <code class="language-plaintext highlighter-rouge">Parquet</code>.</li>
<li><code class="language-plaintext highlighter-rouge">hive</code> implementation is designed to follow Hive&#8217;s behavior and uses Hive SerDe.</li>
</ul>
<p>For example, historically, <code class="language-plaintext highlighter-rouge">native</code> implementation handles <code class="language-plaintext highlighter-rouge">CHAR/VARCHAR</code> with Spark&#8217;s native <code class="language-plaintext highlighter-rouge">String</code> while <code class="language-plaintext highlighter-rouge">hive</code> implementation handles it via Hive <code class="language-plaintext highlighter-rouge">CHAR/VARCHAR</code>. The query results are different. Since Spark 3.1.0, <a href="https://issues.apache.org/jira/browse/SPARK-33480">SPARK-33480</a> removes this difference by supporting <code class="language-plaintext highlighter-rouge">CHAR/VARCHAR</code> from Spark-side.</p>
<h3 id="vectorized-reader">Vectorized Reader</h3>
<p><code class="language-plaintext highlighter-rouge">native</code> implementation supports a vectorized ORC reader and has been the default ORC implementaion since Spark 2.3.
The vectorized reader is used for the native ORC tables (e.g., the ones created using the clause <code class="language-plaintext highlighter-rouge">USING ORC</code>) when <code class="language-plaintext highlighter-rouge">spark.sql.orc.impl</code> is set to <code class="language-plaintext highlighter-rouge">native</code> and <code class="language-plaintext highlighter-rouge">spark.sql.orc.enableVectorizedReader</code> is set to <code class="language-plaintext highlighter-rouge">true</code>.
For nested data types (array, map and struct), vectorized reader is disabled by default. Set <code class="language-plaintext highlighter-rouge">spark.sql.orc.enableNestedColumnVectorizedReader</code> to <code class="language-plaintext highlighter-rouge">true</code> to enable vectorized reader for these types.</p>
<p>For the Hive ORC serde tables (e.g., the ones created using the clause <code class="language-plaintext highlighter-rouge">USING HIVE OPTIONS (fileFormat 'ORC')</code>),
the vectorized reader is used when <code class="language-plaintext highlighter-rouge">spark.sql.hive.convertMetastoreOrc</code> is also set to <code class="language-plaintext highlighter-rouge">true</code>, and is turned on by default.</p>
<h3 id="schema-merging">Schema Merging</h3>
<p>Like Protocol Buffer, Avro, and Thrift, ORC also supports schema evolution. Users can start with
a simple schema, and gradually add more columns to the schema as needed. In this way, users may end
up with multiple ORC files with different but mutually compatible schemas. The ORC data
source is now able to automatically detect this case and merge schemas of all these files.</p>
<p>Since schema merging is a relatively expensive operation, and is not a necessity in most cases, we
turned it off by default . You may enable it by</p>
<ol>
<li>setting data source option <code class="language-plaintext highlighter-rouge">mergeSchema</code> to <code class="language-plaintext highlighter-rouge">true</code> when reading ORC files, or</li>
<li>setting the global SQL option <code class="language-plaintext highlighter-rouge">spark.sql.orc.mergeSchema</code> to <code class="language-plaintext highlighter-rouge">true</code>.</li>
</ol>
<h3 id="zstandard">Zstandard</h3>
<p>Spark supports both Hadoop 2 and 3. Since Spark 3.2, you can take advantage
of Zstandard compression in ORC files on both Hadoop versions.
Please see <a href="https://facebook.github.io/zstd/">Zstandard</a> for the benefits.</p>
<div class="codetabs">
<div data-lang="SQL">
<figure class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">CREATE</span> <span class="k">TABLE</span> <span class="n">compressed</span> <span class="p">(</span>
<span class="k">key</span> <span class="n">STRING</span><span class="p">,</span>
<span class="n">value</span> <span class="n">STRING</span>
<span class="p">)</span>
<span class="k">USING</span> <span class="n">ORC</span>
<span class="k">OPTIONS</span> <span class="p">(</span>
<span class="n">compression</span> <span class="s1">'zstd'</span>
<span class="p">)</span></code></pre></figure>
</div>
</div>
<h3 id="bloom-filters">Bloom Filters</h3>
<p>You can control bloom filters and dictionary encodings for ORC data sources. The following ORC example will create bloom filter and use dictionary encoding only for <code class="language-plaintext highlighter-rouge">favorite_color</code>. To find more detailed information about the extra ORC options, visit the official Apache ORC websites.</p>
<div class="codetabs">
<div data-lang="SQL">
<figure class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">CREATE</span> <span class="k">TABLE</span> <span class="n">users_with_options</span> <span class="p">(</span>
<span class="n">name</span> <span class="n">STRING</span><span class="p">,</span>
<span class="n">favorite_color</span> <span class="n">STRING</span><span class="p">,</span>
<span class="n">favorite_numbers</span> <span class="n">array</span><span class="o">&lt;</span><span class="nb">integer</span><span class="o">&gt;</span>
<span class="p">)</span>
<span class="k">USING</span> <span class="n">ORC</span>
<span class="k">OPTIONS</span> <span class="p">(</span>
<span class="n">orc</span><span class="p">.</span><span class="n">bloom</span><span class="p">.</span><span class="n">filter</span><span class="p">.</span><span class="n">columns</span> <span class="s1">'favorite_color'</span><span class="p">,</span>
<span class="n">orc</span><span class="p">.</span><span class="k">dictionary</span><span class="p">.</span><span class="k">key</span><span class="p">.</span><span class="n">threshold</span> <span class="s1">'1.0'</span><span class="p">,</span>
<span class="n">orc</span><span class="p">.</span><span class="k">column</span><span class="p">.</span><span class="k">encoding</span><span class="p">.</span><span class="n">direct</span> <span class="s1">'name'</span>
<span class="p">)</span></code></pre></figure>
</div>
</div>
<h3 id="columnar-encryption">Columnar Encryption</h3>
<p>Since Spark 3.2, columnar encryption is supported for ORC tables with Apache ORC 1.6.
The following example is using Hadoop KMS as a key provider with the given location.
Please visit <a href="https://hadoop.apache.org/docs/current/hadoop-kms/index.html">Apache Hadoop KMS</a> for the detail.</p>
<div class="codetabs">
<div data-lang="SQL">
<figure class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">CREATE</span> <span class="k">TABLE</span> <span class="k">encrypted</span> <span class="p">(</span>
<span class="n">ssn</span> <span class="n">STRING</span><span class="p">,</span>
<span class="n">email</span> <span class="n">STRING</span><span class="p">,</span>
<span class="n">name</span> <span class="n">STRING</span>
<span class="p">)</span>
<span class="k">USING</span> <span class="n">ORC</span>
<span class="k">OPTIONS</span> <span class="p">(</span>
<span class="n">hadoop</span><span class="p">.</span><span class="k">security</span><span class="p">.</span><span class="k">key</span><span class="p">.</span><span class="n">provider</span><span class="p">.</span><span class="n">path</span> <span class="nv">"kms://http@localhost:9600/kms"</span><span class="p">,</span>
<span class="n">orc</span><span class="p">.</span><span class="k">key</span><span class="p">.</span><span class="n">provider</span> <span class="nv">"hadoop"</span><span class="p">,</span>
<span class="n">orc</span><span class="p">.</span><span class="n">encrypt</span> <span class="nv">"pii:ssn,email"</span><span class="p">,</span>
<span class="n">orc</span><span class="p">.</span><span class="n">mask</span> <span class="nv">"nullify:ssn;sha256:email"</span>
<span class="p">)</span></code></pre></figure>
</div>
</div>
<h3 id="hive-metastore-orc-table-conversion">Hive metastore ORC table conversion</h3>
<p>When reading from Hive metastore ORC tables and inserting to Hive metastore ORC tables, Spark SQL will try to use its own ORC support instead of Hive SerDe for better performance. For CTAS statement, only non-partitioned Hive metastore ORC tables are converted. This behavior is controlled by the <code class="language-plaintext highlighter-rouge">spark.sql.hive.convertMetastoreOrc</code> configuration, and is turned on by default.</p>
<h3 id="configuration">Configuration</h3>
<table class="table">
<tr><th><b>Property Name</b></th><th><b>Default</b></th><th><b>Meaning</b></th><th><b>Since Version</b></th></tr>
<tr>
<td><code>spark.sql.orc.impl</code></td>
<td><code>native</code></td>
<td>
The name of ORC implementation. It can be one of <code>native</code> and <code>hive</code>.
<code>native</code> means the native ORC support. <code>hive</code> means the ORC library
in Hive.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.sql.orc.enableVectorizedReader</code></td>
<td><code>true</code></td>
<td>
Enables vectorized orc decoding in <code>native</code> implementation. If <code>false</code>,
a new non-vectorized ORC reader is used in <code>native</code> implementation.
For <code>hive</code> implementation, this is ignored.
</td>
<td>2.3.0</td>
</tr>
<tr>
<td><code>spark.sql.orc.enableNestedColumnVectorizedReader</code></td>
<td><code>false</code></td>
<td>
Enables vectorized orc decoding in <code>native</code> implementation for nested data types
(array, map and struct). If <code>spark.sql.orc.enableVectorizedReader</code> is set to
<code>false</code>, this is ignored.
</td>
<td>3.2.0</td>
</tr>
<tr>
<td><code>spark.sql.orc.mergeSchema</code></td>
<td>false</td>
<td>
<p>
When true, the ORC data source merges schemas collected from all data files,
otherwise the schema is picked from a random data file.
</p>
</td>
<td>3.0.0</td>
</tr>
<tr>
<td><code>spark.sql.hive.convertMetastoreOrc</code></td>
<td>true</td>
<td>
When set to false, Spark SQL will use the Hive SerDe for ORC tables instead of the built in
support.
</td>
<td>2.0.0</td>
</tr>
</table>
<h2 id="data-source-option">Data Source Option</h2>
<p>Data source options of ORC can be set via:</p>
<ul>
<li>the <code class="language-plaintext highlighter-rouge">.option</code>/<code class="language-plaintext highlighter-rouge">.options</code> methods of
<ul>
<li><code class="language-plaintext highlighter-rouge">DataFrameReader</code></li>
<li><code class="language-plaintext highlighter-rouge">DataFrameWriter</code></li>
<li><code class="language-plaintext highlighter-rouge">DataStreamReader</code></li>
<li><code class="language-plaintext highlighter-rouge">DataStreamWriter</code></li>
</ul>
</li>
<li><code class="language-plaintext highlighter-rouge">OPTIONS</code> clause at <a href="sql-ref-syntax-ddl-create-table-datasource.html">CREATE TABLE USING DATA_SOURCE</a></li>
</ul>
<table class="table">
<tr><th><b>Property Name</b></th><th><b>Default</b></th><th><b>Meaning</b></th><th><b>Scope</b></th></tr>
<tr>
<td><code>mergeSchema</code></td>
<td><code>false</code></td>
<td>sets whether we should merge schemas collected from all ORC part-files. This will override <code>spark.sql.orc.mergeSchema</code>. The default value is specified in <code>spark.sql.orc.mergeSchema</code>.</td>
<td>read</td>
</tr>
<tr>
<td><code>compression</code></td>
<td><code>snappy</code></td>
<td>compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (none, snappy, zlib, lzo, zstd and lz4). This will override <code>orc.compress</code> and <code>spark.sql.orc.compression.codec</code>.</td>
<td>write</td>
</tr>
</table>
<p>Other generic options can be found in <a href="https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html"> Generic File Source Options</a>.</p>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.5.1.min.js"></script>
<script src="js/vendor/bootstrap.bundle.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:3.2.4"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>