blob: 8d05f581d4bba7b70b5d07c9eb7d388299fd5785 [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>JDBC To Other Databases - Spark 3.2.4 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 60px;
padding-bottom: 40px;
}
</style>
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
<link rel="stylesheet" href="css/docsearch.css">
<!-- Matomo -->
<script type="text/javascript">
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body>
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar fixed-top navbar-expand-md navbar-light bg-light" id="topbar">
<div class="container">
<div class="navbar-header">
<div class="navbar-brand"><a href="index.html">
<img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">3.2.4</span>
</div>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav">
<!--TODO(andyk): Add class="active" attribute to li some how.-->
<li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="quick-start.html">Quick Start</a>
<a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a>
<a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="api/java/index.html">Java</a>
<a class="dropdown-item" href="api/python/index.html">Python</a>
<a class="dropdown-item" href="api/R/index.html">R</a>
<a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="cluster-overview.html">Overview</a>
<a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="running-on-mesos.html">Mesos</a>
<a class="dropdown-item" href="running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="configuration.html">Configuration</a>
<a class="dropdown-item" href="monitoring.html">Monitoring</a>
<a class="dropdown-item" href="tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="security.html">Security</a>
<a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v3.2.4</span></span>-->
</div>
</div>
</nav>
<div class="container-wrapper">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="sql-programming-guide.html">Spark SQL Guide</a></h3>
<ul>
<li>
<a href="sql-getting-started.html">
Getting Started
</a>
</li>
<li>
<a href="sql-data-sources.html">
Data Sources
</a>
</li>
<ul>
<li>
<a href="sql-data-sources-load-save-functions.html">
Generic Load/Save Functions
</a>
</li>
<li>
<a href="sql-data-sources-generic-options.html">
Generic File Source Options
</a>
</li>
<li>
<a href="sql-data-sources-parquet.html">
Parquet Files
</a>
</li>
<li>
<a href="sql-data-sources-orc.html">
ORC Files
</a>
</li>
<li>
<a href="sql-data-sources-json.html">
JSON Files
</a>
</li>
<li>
<a href="sql-data-sources-csv.html">
CSV Files
</a>
</li>
<li>
<a href="sql-data-sources-text.html">
Text Files
</a>
</li>
<li>
<a href="sql-data-sources-hive-tables.html">
Hive Tables
</a>
</li>
<li>
<a href="sql-data-sources-jdbc.html">
<b>JDBC To Other Databases</b>
</a>
</li>
<li>
<a href="sql-data-sources-avro.html">
Avro Files
</a>
</li>
<li>
<a href="sql-data-sources-binaryFile.html">
Whole Binary Files
</a>
</li>
<li>
<a href="sql-data-sources-troubleshooting.html">
Troubleshooting
</a>
</li>
</ul>
<li>
<a href="sql-performance-tuning.html">
Performance Tuning
</a>
</li>
<li>
<a href="sql-distributed-sql-engine.html">
Distributed SQL Engine
</a>
</li>
<li>
<a href="sql-pyspark-pandas-with-arrow.html">
PySpark Usage Guide for Pandas with Apache Arrow
</a>
</li>
<li>
<a href="sql-migration-old.html">
Migration Guide
</a>
</li>
<li>
<a href="sql-ref.html">
SQL Reference
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar mr-3" id="content">
<h1 class="title">JDBC To Other Databases</h1>
<ul id="markdown-toc">
<li><a href="#data-source-option" id="markdown-toc-data-source-option">Data Source Option</a></li>
</ul>
<p>Spark SQL also includes a data source that can read data from other databases using JDBC. This
functionality should be preferred over using <a href="api/scala/org/apache/spark/rdd/JdbcRDD.html">JdbcRDD</a>.
This is because the results are returned
as a DataFrame and they can easily be processed in Spark SQL or joined with other data sources.
The JDBC data source is also easier to use from Java or Python as it does not require the user to
provide a ClassTag.
(Note that this is different than the Spark SQL JDBC server, which allows other applications to
run queries using Spark SQL).</p>
<p>To get started you will need to include the JDBC driver for your particular database on the
spark classpath. For example, to connect to postgres from the Spark Shell you would run the
following command:</p>
<figure class="highlight"><pre><code class="language-bash" data-lang="bash">./bin/spark-shell <span class="nt">--driver-class-path</span> postgresql-9.4.1207.jar <span class="nt">--jars</span> postgresql-9.4.1207.jar</code></pre></figure>
<h2 id="data-source-option">Data Source Option</h2>
<p>Spark supports the following case-insensitive options for JDBC. The Data source options of JDBC can be set via:</p>
<ul>
<li>the <code class="language-plaintext highlighter-rouge">.option</code>/<code class="language-plaintext highlighter-rouge">.options</code> methods of
<ul>
<li><code class="language-plaintext highlighter-rouge">DataFrameReader</code></li>
<li><code class="language-plaintext highlighter-rouge">DataFrameWriter</code></li>
</ul>
</li>
<li><code class="language-plaintext highlighter-rouge">OPTIONS</code> clause at <a href="sql-ref-syntax-ddl-create-table-datasource.html">CREATE TABLE USING DATA_SOURCE</a></li>
</ul>
<p>For connection properties, users can specify the JDBC connection properties in the data source options.
<code>user</code> and <code>password</code> are normally provided as connection properties for
logging into the data sources.</p>
<table class="table">
<tr><th><b>Property Name</b></th><th><b>Default</b></th><th><b>Meaning</b></th><th><b>Scope</b></th></tr>
<tr>
<td><code>url</code></td>
<td>(none)</td>
<td>
The JDBC URL of the form <code>jdbc:subprotocol:subname</code> to connect to. The source-specific connection properties may be specified in the URL. e.g., <code>jdbc:postgresql://localhost/test?user=fred&amp;password=secret</code>
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>dbtable</code></td>
<td>(none)</td>
<td>
The JDBC table that should be read from or written into. Note that when using it in the read
path anything that is valid in a <code>FROM</code> clause of a SQL query can be used.
For example, instead of a full table you could also use a subquery in parentheses. It is not
allowed to specify <code>dbtable</code> and <code>query</code> options at the same time.
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>query</code></td>
<td>(none)</td>
<td>
A query that will be used to read data into Spark. The specified query will be parenthesized and used
as a subquery in the <code>FROM</code> clause. Spark will also assign an alias to the subquery clause.
As an example, spark will issue a query of the following form to the JDBC Source.<br /><br />
<code> SELECT &lt;columns&gt; FROM (&lt;user_specified_query&gt;) spark_gen_alias</code><br /><br />
Below are a couple of restrictions while using this option.<br />
<ol>
<li> It is not allowed to specify <code>dbtable</code> and <code>query</code> options at the same time. </li>
<li> It is not allowed to specify <code>query</code> and <code>partitionColumn</code> options at the same time. When specifying
<code>partitionColumn</code> option is required, the subquery can be specified using <code>dbtable</code> option instead and
partition columns can be qualified using the subquery alias provided as part of <code>dbtable</code>. <br />
Example:<br />
<code>
spark.read.format("jdbc")<br />
.option("url", jdbcUrl)<br />
.option("query", "select c1, c2 from t1")<br />
.load()
</code></li>
</ol>
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>driver</code></td>
<td>(none)</td>
<td>
The class name of the JDBC driver to use to connect to this URL.
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>partitionColumn, lowerBound, upperBound</code></td>
<td>(none)</td>
<td>
These options must all be specified if any of them is specified. In addition,
<code>numPartitions</code> must be specified. They describe how to partition the table when
reading in parallel from multiple workers.
<code>partitionColumn</code> must be a numeric, date, or timestamp column from the table in question.
Notice that <code>lowerBound</code> and <code>upperBound</code> are just used to decide the
partition stride, not for filtering the rows in table. So all rows in the table will be
partitioned and returned. This option applies only to reading.
</td>
<td>read</td>
</tr>
<tr>
<td><code>numPartitions</code></td>
<td>(none)</td>
<td>
The maximum number of partitions that can be used for parallelism in table reading and
writing. This also determines the maximum number of concurrent JDBC connections.
If the number of partitions to write exceeds this limit, we decrease it to this limit by
calling <code>coalesce(numPartitions)</code> before writing.
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>queryTimeout</code></td>
<td><code>0</code></td>
<td>
The number of seconds the driver will wait for a Statement object to execute to the given
number of seconds. Zero means there is no limit. In the write path, this option depends on
how JDBC drivers implement the API <code>setQueryTimeout</code>, e.g., the h2 JDBC driver
checks the timeout of each query instead of an entire JDBC batch.
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>fetchsize</code></td>
<td><code>0</code></td>
<td>
The JDBC fetch size, which determines how many rows to fetch per round trip. This can help performance on JDBC drivers which default to low fetch size (e.g. Oracle with 10 rows).
</td>
<td>read</td>
</tr>
<tr>
<td><code>batchsize</code></td>
<td><code>1000</code></td>
<td>
The JDBC batch size, which determines how many rows to insert per round trip. This can help performance on JDBC drivers. This option applies only to writing.
</td>
<td>write</td>
</tr>
<tr>
<td><code>isolationLevel</code></td>
<td><code>READ_UNCOMMITTED</code></td>
<td>
The transaction isolation level, which applies to current connection. It can be one of <code>NONE</code>, <code>READ_COMMITTED</code>, <code>READ_UNCOMMITTED</code>, <code>REPEATABLE_READ</code>, or <code>SERIALIZABLE</code>, corresponding to standard transaction isolation levels defined by JDBC's Connection object, with default of <code>READ_UNCOMMITTED</code>. Please refer the documentation in <code>java.sql.Connection</code>.
</td>
<td>write</td>
</tr>
<tr>
<td><code>sessionInitStatement</code></td>
<td>(none)</td>
<td>
After each database session is opened to the remote DB and before starting to read data, this option executes a custom SQL statement (or a PL/SQL block). Use this to implement session initialization code. Example: <code>option("sessionInitStatement", """BEGIN execute immediate 'alter session set "_serial_direct_read"=true'; END;""")</code>
</td>
<td>read</td>
</tr>
<tr>
<td><code>truncate</code></td>
<td><code>false</code></td>
<td>
This is a JDBC writer related option. When <code>SaveMode.Overwrite</code> is enabled, this option causes Spark to truncate an existing table instead of dropping and recreating it. This can be more efficient, and prevents the table metadata (e.g., indices) from being removed. However, it will not work in some cases, such as when the new data has a different schema. In case of failures, users should turn off <code>truncate</code> option to use <code>DROP TABLE</code> again. Also, due to the different behavior of <code>TRUNCATE TABLE</code> among DBMS, it's not always safe to use this. MySQLDialect, DB2Dialect, MsSqlServerDialect, DerbyDialect, and OracleDialect supports this while PostgresDialect and default JDBCDirect doesn't. For unknown and unsupported JDBCDirect, the user option <code>truncate</code> is ignored.
<td>write</td>
</td>
</tr>
<tr>
<td><code>cascadeTruncate</code></td>
<td>the default cascading truncate behaviour of the JDBC database in question, specified in the <code>isCascadeTruncate</code> in each JDBCDialect</td>
<td>
This is a JDBC writer related option. If enabled and supported by the JDBC database (PostgreSQL and Oracle at the moment), this options allows execution of a <code>TRUNCATE TABLE t CASCADE</code> (in the case of PostgreSQL a <code>TRUNCATE TABLE ONLY t CASCADE</code> is executed to prevent inadvertently truncating descendant tables). This will affect other tables, and thus should be used with care.
</td>
<td>write</td>
</tr>
<tr>
<td><code>createTableOptions</code></td>
<td><code></code></td>
<td>
This is a JDBC writer related option. If specified, this option allows setting of database-specific table and partition options when creating a table (e.g., <code>CREATE TABLE t (name string) ENGINE=InnoDB.</code>).
</td>
<td>write</td>
</tr>
<tr>
<td><code>createTableColumnTypes</code></td>
<td>(none)</td>
<td>
The database column data types to use instead of the defaults, when creating the table. Data type information should be specified in the same format as CREATE TABLE columns syntax (e.g: <code>"name CHAR(64), comments VARCHAR(1024)")</code>. The specified types should be valid spark sql data types.
</td>
<td>write</td>
</tr>
<tr>
<td><code>customSchema</code></td>
<td>(none)</td>
<td>
The custom schema to use for reading data from JDBC connectors. For example, <code>"id DECIMAL(38, 0), name STRING"</code>. You can also specify partial fields, and the others use the default type mapping. For example, <code>"id DECIMAL(38, 0)"</code>. The column names should be identical to the corresponding column names of JDBC table. Users can specify the corresponding data types of Spark SQL instead of using the defaults.
</td>
<td>read</td>
</tr>
<tr>
<td><code>pushDownPredicate</code></td>
<td><code>true</code></td>
<td>
The option to enable or disable predicate push-down into the JDBC data source. The default value is true, in which case Spark will push down filters to the JDBC data source as much as possible. Otherwise, if set to false, no filter will be pushed down to the JDBC data source and thus all filters will be handled by Spark. Predicate push-down is usually turned off when the predicate filtering is performed faster by Spark than by the JDBC data source.
</td>
<td>read</td>
</tr>
<tr>
<td><code>pushDownAggregate</code></td>
<td><code>false</code></td>
<td>
The option to enable or disable aggregate push-down in V2 JDBC data source. The default value is false, in which case Spark will not push down aggregates to the JDBC data source. Otherwise, if sets to true, aggregates will be pushed down to the JDBC data source. Aggregate push-down is usually turned off when the aggregate is performed faster by Spark than by the JDBC data source. Please note that aggregates can be pushed down if and only if all the aggregate functions and the related filters can be pushed down. Spark assumes that the data source can't fully complete the aggregate and does a final aggregate over the data source output.
</td>
<td>read</td>
</tr>
<tr>
<td><code>keytab</code></td>
<td>(none)</td>
<td>
Location of the kerberos keytab file (which must be pre-uploaded to all nodes either by <code>--files</code> option of spark-submit or manually) for the JDBC client. When path information found then Spark considers the keytab distributed manually, otherwise <code>--files</code> assumed. If both <code>keytab</code> and <code>principal</code> are defined then Spark tries to do kerberos authentication.
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>principal</code></td>
<td>(none)</td>
<td>
Specifies kerberos principal name for the JDBC client. If both <code>keytab</code> and <code>principal</code> are defined then Spark tries to do kerberos authentication.
</td>
<td>read/write</td>
</tr>
<tr>
<td><code>refreshKrb5Config</code></td>
<td><code>false</code></td>
<td>
This option controls whether the kerberos configuration is to be refreshed or not for the JDBC client before
establishing a new connection. Set to true if you want to refresh the configuration, otherwise set to false.
The default value is false. Note that if you set this option to true and try to establish multiple connections,
a race condition can occur. One possble situation would be like as follows.
<ol>
<li>refreshKrb5Config flag is set with security context 1</li>
<li>A JDBC connection provider is used for the corresponding DBMS</li>
<li>The krb5.conf is modified but the JVM not yet realized that it must be reloaded</li>
<li>Spark authenticates successfully for security context 1</li>
<li>The JVM loads security context 2 from the modified krb5.conf</li>
<li>Spark restores the previously saved security context 1</li>
<li>The modified krb5.conf content just gone</li>
</ol>
</td>
<td>read/write</td>
</tr>
</table>
<p>Note that kerberos authentication with keytab is not always supported by the JDBC driver.<br />
Before using <code>keytab</code> and <code>principal</code> configuration options, please make sure the following requirements are met:</p>
<ul>
<li>The included JDBC driver version supports kerberos authentication with keytab.</li>
<li>There is a built-in connection provider which supports the used database.</li>
</ul>
<p>There is a built-in connection providers for the following databases:</p>
<ul>
<li>DB2</li>
<li>MariaDB</li>
<li>MS Sql</li>
<li>Oracle</li>
<li>PostgreSQL</li>
</ul>
<p>If the requirements are not met, please consider using the <code>JdbcConnectionProvider</code> developer API to handle custom authentication.</p>
<div class="codetabs">
<div data-lang="scala">
<div class="highlight"><pre class="codehilite"><code><span class="c1">// Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods</span>
<span class="c1">// Loading data from a JDBC source</span>
<span class="k">val</span> <span class="nv">jdbcDF</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span>
<span class="o">.</span><span class="py">format</span><span class="o">(</span><span class="s">"jdbc"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"url"</span><span class="o">,</span> <span class="s">"jdbc:postgresql:dbserver"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"dbtable"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"user"</span><span class="o">,</span> <span class="s">"username"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"password"</span><span class="o">,</span> <span class="s">"password"</span><span class="o">)</span>
<span class="o">.</span><span class="py">load</span><span class="o">()</span>
<span class="k">val</span> <span class="nv">connectionProperties</span> <span class="k">=</span> <span class="k">new</span> <span class="nc">Properties</span><span class="o">()</span>
<span class="nv">connectionProperties</span><span class="o">.</span><span class="py">put</span><span class="o">(</span><span class="s">"user"</span><span class="o">,</span> <span class="s">"username"</span><span class="o">)</span>
<span class="nv">connectionProperties</span><span class="o">.</span><span class="py">put</span><span class="o">(</span><span class="s">"password"</span><span class="o">,</span> <span class="s">"password"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">jdbcDF2</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span>
<span class="o">.</span><span class="py">jdbc</span><span class="o">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">,</span> <span class="n">connectionProperties</span><span class="o">)</span>
<span class="c1">// Specifying the custom data types of the read schema</span>
<span class="nv">connectionProperties</span><span class="o">.</span><span class="py">put</span><span class="o">(</span><span class="s">"customSchema"</span><span class="o">,</span> <span class="s">"id DECIMAL(38, 0), name STRING"</span><span class="o">)</span>
<span class="k">val</span> <span class="nv">jdbcDF3</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span>
<span class="o">.</span><span class="py">jdbc</span><span class="o">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">,</span> <span class="n">connectionProperties</span><span class="o">)</span>
<span class="c1">// Saving data to a JDBC source</span>
<span class="nv">jdbcDF</span><span class="o">.</span><span class="py">write</span>
<span class="o">.</span><span class="py">format</span><span class="o">(</span><span class="s">"jdbc"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"url"</span><span class="o">,</span> <span class="s">"jdbc:postgresql:dbserver"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"dbtable"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"user"</span><span class="o">,</span> <span class="s">"username"</span><span class="o">)</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"password"</span><span class="o">,</span> <span class="s">"password"</span><span class="o">)</span>
<span class="o">.</span><span class="py">save</span><span class="o">()</span>
<span class="nv">jdbcDF2</span><span class="o">.</span><span class="py">write</span>
<span class="o">.</span><span class="py">jdbc</span><span class="o">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">,</span> <span class="n">connectionProperties</span><span class="o">)</span>
<span class="c1">// Specifying create table column data types on write</span>
<span class="nv">jdbcDF</span><span class="o">.</span><span class="py">write</span>
<span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"createTableColumnTypes"</span><span class="o">,</span> <span class="s">"name CHAR(64), comments VARCHAR(1024)"</span><span class="o">)</span>
<span class="o">.</span><span class="py">jdbc</span><span class="o">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">,</span> <span class="n">connectionProperties</span><span class="o">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<div class="highlight"><pre class="codehilite"><code><span class="c1">// Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods</span>
<span class="c1">// Loading data from a JDBC source</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">jdbcDF</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">()</span>
<span class="o">.</span><span class="na">format</span><span class="o">(</span><span class="s">"jdbc"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"url"</span><span class="o">,</span> <span class="s">"jdbc:postgresql:dbserver"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"dbtable"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"user"</span><span class="o">,</span> <span class="s">"username"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"password"</span><span class="o">,</span> <span class="s">"password"</span><span class="o">)</span>
<span class="o">.</span><span class="na">load</span><span class="o">();</span>
<span class="nc">Properties</span> <span class="n">connectionProperties</span> <span class="o">=</span> <span class="k">new</span> <span class="nc">Properties</span><span class="o">();</span>
<span class="n">connectionProperties</span><span class="o">.</span><span class="na">put</span><span class="o">(</span><span class="s">"user"</span><span class="o">,</span> <span class="s">"username"</span><span class="o">);</span>
<span class="n">connectionProperties</span><span class="o">.</span><span class="na">put</span><span class="o">(</span><span class="s">"password"</span><span class="o">,</span> <span class="s">"password"</span><span class="o">);</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">jdbcDF2</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">()</span>
<span class="o">.</span><span class="na">jdbc</span><span class="o">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">,</span> <span class="n">connectionProperties</span><span class="o">);</span>
<span class="c1">// Saving data to a JDBC source</span>
<span class="n">jdbcDF</span><span class="o">.</span><span class="na">write</span><span class="o">()</span>
<span class="o">.</span><span class="na">format</span><span class="o">(</span><span class="s">"jdbc"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"url"</span><span class="o">,</span> <span class="s">"jdbc:postgresql:dbserver"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"dbtable"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"user"</span><span class="o">,</span> <span class="s">"username"</span><span class="o">)</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"password"</span><span class="o">,</span> <span class="s">"password"</span><span class="o">)</span>
<span class="o">.</span><span class="na">save</span><span class="o">();</span>
<span class="n">jdbcDF2</span><span class="o">.</span><span class="na">write</span><span class="o">()</span>
<span class="o">.</span><span class="na">jdbc</span><span class="o">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">,</span> <span class="n">connectionProperties</span><span class="o">);</span>
<span class="c1">// Specifying create table column data types on write</span>
<span class="n">jdbcDF</span><span class="o">.</span><span class="na">write</span><span class="o">()</span>
<span class="o">.</span><span class="na">option</span><span class="o">(</span><span class="s">"createTableColumnTypes"</span><span class="o">,</span> <span class="s">"name CHAR(64), comments VARCHAR(1024)"</span><span class="o">)</span>
<span class="o">.</span><span class="na">jdbc</span><span class="o">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="o">,</span> <span class="s">"schema.tablename"</span><span class="o">,</span> <span class="n">connectionProperties</span><span class="o">);</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="python">
<div class="highlight"><pre class="codehilite"><code><span class="c1"># Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods
# Loading data from a JDBC source
</span><span class="n">jdbcDF</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span> \
<span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"jdbc"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"url"</span><span class="p">,</span> <span class="s">"jdbc:postgresql:dbserver"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"dbtable"</span><span class="p">,</span> <span class="s">"schema.tablename"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"user"</span><span class="p">,</span> <span class="s">"username"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"password"</span><span class="p">,</span> <span class="s">"password"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">load</span><span class="p">()</span>
<span class="n">jdbcDF2</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span> \
<span class="p">.</span><span class="n">jdbc</span><span class="p">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="p">,</span> <span class="s">"schema.tablename"</span><span class="p">,</span>
<span class="n">properties</span><span class="o">=</span><span class="p">{</span><span class="s">"user"</span><span class="p">:</span> <span class="s">"username"</span><span class="p">,</span> <span class="s">"password"</span><span class="p">:</span> <span class="s">"password"</span><span class="p">})</span>
<span class="c1"># Specifying dataframe column data types on read
</span><span class="n">jdbcDF3</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span> \
<span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"jdbc"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"url"</span><span class="p">,</span> <span class="s">"jdbc:postgresql:dbserver"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"dbtable"</span><span class="p">,</span> <span class="s">"schema.tablename"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"user"</span><span class="p">,</span> <span class="s">"username"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"password"</span><span class="p">,</span> <span class="s">"password"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"customSchema"</span><span class="p">,</span> <span class="s">"id DECIMAL(38, 0), name STRING"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">load</span><span class="p">()</span>
<span class="c1"># Saving data to a JDBC source
</span><span class="n">jdbcDF</span><span class="p">.</span><span class="n">write</span> \
<span class="p">.</span><span class="nb">format</span><span class="p">(</span><span class="s">"jdbc"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"url"</span><span class="p">,</span> <span class="s">"jdbc:postgresql:dbserver"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"dbtable"</span><span class="p">,</span> <span class="s">"schema.tablename"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"user"</span><span class="p">,</span> <span class="s">"username"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"password"</span><span class="p">,</span> <span class="s">"password"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">save</span><span class="p">()</span>
<span class="n">jdbcDF2</span><span class="p">.</span><span class="n">write</span> \
<span class="p">.</span><span class="n">jdbc</span><span class="p">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="p">,</span> <span class="s">"schema.tablename"</span><span class="p">,</span>
<span class="n">properties</span><span class="o">=</span><span class="p">{</span><span class="s">"user"</span><span class="p">:</span> <span class="s">"username"</span><span class="p">,</span> <span class="s">"password"</span><span class="p">:</span> <span class="s">"password"</span><span class="p">})</span>
<span class="c1"># Specifying create table column data types on write
</span><span class="n">jdbcDF</span><span class="p">.</span><span class="n">write</span> \
<span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"createTableColumnTypes"</span><span class="p">,</span> <span class="s">"name CHAR(64), comments VARCHAR(1024)"</span><span class="p">)</span> \
<span class="p">.</span><span class="n">jdbc</span><span class="p">(</span><span class="s">"jdbc:postgresql:dbserver"</span><span class="p">,</span> <span class="s">"schema.tablename"</span><span class="p">,</span>
<span class="n">properties</span><span class="o">=</span><span class="p">{</span><span class="s">"user"</span><span class="p">:</span> <span class="s">"username"</span><span class="p">,</span> <span class="s">"password"</span><span class="p">:</span> <span class="s">"password"</span><span class="p">})</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/sql/datasource.py" in the Spark repo.</small></div>
</div>
<div data-lang="r">
<div class="highlight"><pre class="codehilite"><code><span class="c1"># Loading data from a JDBC source</span><span class="w">
</span><span class="n">df</span><span class="w"> </span><span class="o">&lt;-</span><span class="w"> </span><span class="n">read.jdbc</span><span class="p">(</span><span class="s2">"jdbc:postgresql:dbserver"</span><span class="p">,</span><span class="w"> </span><span class="s2">"schema.tablename"</span><span class="p">,</span><span class="w"> </span><span class="n">user</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"username"</span><span class="p">,</span><span class="w"> </span><span class="n">password</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"password"</span><span class="p">)</span><span class="w">
</span><span class="c1"># Saving data to a JDBC source</span><span class="w">
</span><span class="n">write.jdbc</span><span class="p">(</span><span class="n">df</span><span class="p">,</span><span class="w"> </span><span class="s2">"jdbc:postgresql:dbserver"</span><span class="p">,</span><span class="w"> </span><span class="s2">"schema.tablename"</span><span class="p">,</span><span class="w"> </span><span class="n">user</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"username"</span><span class="p">,</span><span class="w"> </span><span class="n">password</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="s2">"password"</span><span class="p">)</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/r/RSparkSQLExample.R" in the Spark repo.</small></div>
</div>
<div data-lang="SQL">
<figure class="highlight"><pre><code class="language-sql" data-lang="sql"><span class="k">CREATE</span> <span class="k">TEMPORARY</span> <span class="k">VIEW</span> <span class="n">jdbcTable</span>
<span class="k">USING</span> <span class="n">org</span><span class="p">.</span><span class="n">apache</span><span class="p">.</span><span class="n">spark</span><span class="p">.</span><span class="k">sql</span><span class="p">.</span><span class="n">jdbc</span>
<span class="k">OPTIONS</span> <span class="p">(</span>
<span class="n">url</span> <span class="nv">"jdbc:postgresql:dbserver"</span><span class="p">,</span>
<span class="n">dbtable</span> <span class="nv">"schema.tablename"</span><span class="p">,</span>
<span class="k">user</span> <span class="s1">'username'</span><span class="p">,</span>
<span class="n">password</span> <span class="s1">'password'</span>
<span class="p">)</span>
<span class="k">INSERT</span> <span class="k">INTO</span> <span class="k">TABLE</span> <span class="n">jdbcTable</span>
<span class="k">SELECT</span> <span class="o">*</span> <span class="k">FROM</span> <span class="n">resultTable</span></code></pre></figure>
</div>
</div>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.5.1.min.js"></script>
<script src="js/vendor/bootstrap.bundle.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:3.2.4"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>