blob: 713dfd16fc5058ba6db4348b3ff52e5b5f80877a [file] [log] [blame]
<!DOCTYPE html>
<!--[if lt IE 7]> <html class="no-js lt-ie9 lt-ie8 lt-ie7"> <![endif]-->
<!--[if IE 7]> <html class="no-js lt-ie9 lt-ie8"> <![endif]-->
<!--[if IE 8]> <html class="no-js lt-ie9"> <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js"> <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>CSV Files - Spark 3.3.4 Documentation</title>
<link rel="stylesheet" href="css/bootstrap.min.css">
<style>
body {
padding-top: 60px;
padding-bottom: 40px;
}
</style>
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="css/main.css">
<script src="js/vendor/modernizr-2.6.1-respond-1.1.0.min.js"></script>
<link rel="stylesheet" href="css/pygments-default.css">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.css" />
<link rel="stylesheet" href="css/docsearch.css">
<!-- Matomo -->
<script>
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(["disableCookies"]);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '40']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
</head>
<body>
<!--[if lt IE 7]>
<p class="chromeframe">You are using an outdated browser. <a href="https://browsehappy.com/">Upgrade your browser today</a> or <a href="http://www.google.com/chromeframe/?redirect=true">install Google Chrome Frame</a> to better experience this site.</p>
<![endif]-->
<!-- This code is taken from http://twitter.github.com/bootstrap/examples/hero.html -->
<nav class="navbar fixed-top navbar-expand-md navbar-light bg-light" id="topbar">
<div class="container">
<div class="navbar-header">
<div class="navbar-brand"><a href="index.html">
<img src="img/spark-logo-hd.png" style="height:50px;"/></a><span class="version">3.3.4</span>
</div>
</div>
<button class="navbar-toggler" type="button" data-toggle="collapse"
data-target="#navbarCollapse" aria-controls="navbarCollapse"
aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarCollapse">
<ul class="navbar-nav">
<!--TODO(andyk): Add class="active" attribute to li some how.-->
<li class="nav-item"><a href="index.html" class="nav-link">Overview</a></li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarQuickStart" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Programming Guides</a>
<div class="dropdown-menu" aria-labelledby="navbarQuickStart">
<a class="dropdown-item" href="quick-start.html">Quick Start</a>
<a class="dropdown-item" href="rdd-programming-guide.html">RDDs, Accumulators, Broadcasts Vars</a>
<a class="dropdown-item" href="sql-programming-guide.html">SQL, DataFrames, and Datasets</a>
<a class="dropdown-item" href="structured-streaming-programming-guide.html">Structured Streaming</a>
<a class="dropdown-item" href="streaming-programming-guide.html">Spark Streaming (DStreams)</a>
<a class="dropdown-item" href="ml-guide.html">MLlib (Machine Learning)</a>
<a class="dropdown-item" href="graphx-programming-guide.html">GraphX (Graph Processing)</a>
<a class="dropdown-item" href="sparkr.html">SparkR (R on Spark)</a>
<a class="dropdown-item" href="api/python/getting_started/index.html">PySpark (Python on Spark)</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarAPIDocs" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">API Docs</a>
<div class="dropdown-menu" aria-labelledby="navbarAPIDocs">
<a class="dropdown-item" href="api/scala/org/apache/spark/index.html">Scala</a>
<a class="dropdown-item" href="api/java/index.html">Java</a>
<a class="dropdown-item" href="api/python/index.html">Python</a>
<a class="dropdown-item" href="api/R/index.html">R</a>
<a class="dropdown-item" href="api/sql/index.html">SQL, Built-in Functions</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarDeploying" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Deploying</a>
<div class="dropdown-menu" aria-labelledby="navbarDeploying">
<a class="dropdown-item" href="cluster-overview.html">Overview</a>
<a class="dropdown-item" href="submitting-applications.html">Submitting Applications</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="spark-standalone.html">Spark Standalone</a>
<a class="dropdown-item" href="running-on-mesos.html">Mesos</a>
<a class="dropdown-item" href="running-on-yarn.html">YARN</a>
<a class="dropdown-item" href="running-on-kubernetes.html">Kubernetes</a>
</div>
</li>
<li class="nav-item dropdown">
<a href="#" class="nav-link dropdown-toggle" id="navbarMore" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">More</a>
<div class="dropdown-menu" aria-labelledby="navbarMore">
<a class="dropdown-item" href="configuration.html">Configuration</a>
<a class="dropdown-item" href="monitoring.html">Monitoring</a>
<a class="dropdown-item" href="tuning.html">Tuning Guide</a>
<a class="dropdown-item" href="job-scheduling.html">Job Scheduling</a>
<a class="dropdown-item" href="security.html">Security</a>
<a class="dropdown-item" href="hardware-provisioning.html">Hardware Provisioning</a>
<a class="dropdown-item" href="migration-guide.html">Migration Guide</a>
<div class="dropdown-divider"></div>
<a class="dropdown-item" href="building-spark.html">Building Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/contributing.html">Contributing to Spark</a>
<a class="dropdown-item" href="https://spark.apache.org/third-party-projects.html">Third Party Projects</a>
</div>
</li>
<li class="nav-item">
<input type="text" id="docsearch-input" placeholder="Search the docs…">
</li>
</ul>
<!--<span class="navbar-text navbar-right"><span class="version-text">v3.3.4</span></span>-->
</div>
</div>
</nav>
<div class="container-wrapper">
<div class="left-menu-wrapper">
<div class="left-menu">
<h3><a href="sql-programming-guide.html">Spark SQL Guide</a></h3>
<ul>
<li>
<a href="sql-getting-started.html">
Getting Started
</a>
</li>
<li>
<a href="sql-data-sources.html">
Data Sources
</a>
</li>
<ul>
<li>
<a href="sql-data-sources-load-save-functions.html">
Generic Load/Save Functions
</a>
</li>
<li>
<a href="sql-data-sources-generic-options.html">
Generic File Source Options
</a>
</li>
<li>
<a href="sql-data-sources-parquet.html">
Parquet Files
</a>
</li>
<li>
<a href="sql-data-sources-orc.html">
ORC Files
</a>
</li>
<li>
<a href="sql-data-sources-json.html">
JSON Files
</a>
</li>
<li>
<a href="sql-data-sources-csv.html">
CSV Files
</a>
</li>
<li>
<a href="sql-data-sources-text.html">
Text Files
</a>
</li>
<li>
<a href="sql-data-sources-hive-tables.html">
Hive Tables
</a>
</li>
<li>
<a href="sql-data-sources-jdbc.html">
JDBC To Other Databases
</a>
</li>
<li>
<a href="sql-data-sources-avro.html">
Avro Files
</a>
</li>
<li>
<a href="sql-data-sources-binaryFile.html">
Whole Binary Files
</a>
</li>
<li>
<a href="sql-data-sources-troubleshooting.html">
Troubleshooting
</a>
</li>
</ul>
<li>
<a href="sql-performance-tuning.html">
Performance Tuning
</a>
</li>
<li>
<a href="sql-distributed-sql-engine.html">
Distributed SQL Engine
</a>
</li>
<li>
<a href="sql-pyspark-pandas-with-arrow.html">
PySpark Usage Guide for Pandas with Apache Arrow
</a>
</li>
<li>
<a href="sql-migration-old.html">
Migration Guide
</a>
</li>
<li>
<a href="sql-ref.html">
SQL Reference
</a>
</li>
</ul>
</div>
</div>
<input id="nav-trigger" class="nav-trigger" checked type="checkbox">
<label for="nav-trigger"></label>
<div class="content-with-sidebar mr-3" id="content">
<h1 class="title">CSV Files</h1>
<p>Spark SQL provides <code class="language-plaintext highlighter-rouge">spark.read().csv("file_name")</code> to read a file or directory of files in CSV format into Spark DataFrame, and <code class="language-plaintext highlighter-rouge">dataframe.write().csv("path")</code> to write to a CSV file. Function <code class="language-plaintext highlighter-rouge">option()</code> can be used to customize the behavior of reading or writing, such as controlling behavior of the header, delimiter character, character set, and so on.</p>
<div class="codetabs">
<div data-lang="scala">
<div class="highlight"><pre class="codehilite"><code><span class="c1">// A CSV dataset is pointed to by path.</span>
<span class="c1">// The path can be either a single CSV file or a directory of CSV files</span>
<span class="k">val</span> <span class="nv">path</span> <span class="k">=</span> <span class="s">"examples/src/main/resources/people.csv"</span>
<span class="k">val</span> <span class="nv">df</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">csv</span><span class="o">(</span><span class="n">path</span><span class="o">)</span>
<span class="nv">df</span><span class="o">.</span><span class="py">show</span><span class="o">()</span>
<span class="c1">// +------------------+</span>
<span class="c1">// | _c0|</span>
<span class="c1">// +------------------+</span>
<span class="c1">// | name;age;job|</span>
<span class="c1">// |Jorge;30;Developer|</span>
<span class="c1">// | Bob;32;Developer|</span>
<span class="c1">// +------------------+</span>
<span class="c1">// Read a csv with delimiter, the default delimiter is ","</span>
<span class="k">val</span> <span class="nv">df2</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"delimiter"</span><span class="o">,</span> <span class="s">";"</span><span class="o">).</span><span class="py">csv</span><span class="o">(</span><span class="n">path</span><span class="o">)</span>
<span class="nv">df2</span><span class="o">.</span><span class="py">show</span><span class="o">()</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// | _c0|_c1| _c2|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// | name|age| job|</span>
<span class="c1">// |Jorge| 30|Developer|</span>
<span class="c1">// | Bob| 32|Developer|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// Read a csv with delimiter and a header</span>
<span class="k">val</span> <span class="nv">df3</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">option</span><span class="o">(</span><span class="s">"delimiter"</span><span class="o">,</span> <span class="s">";"</span><span class="o">).</span><span class="py">option</span><span class="o">(</span><span class="s">"header"</span><span class="o">,</span> <span class="s">"true"</span><span class="o">).</span><span class="py">csv</span><span class="o">(</span><span class="n">path</span><span class="o">)</span>
<span class="nv">df3</span><span class="o">.</span><span class="py">show</span><span class="o">()</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// | name|age| job|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// |Jorge| 30|Developer|</span>
<span class="c1">// | Bob| 32|Developer|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// You can also use options() to use multiple options</span>
<span class="k">val</span> <span class="nv">df4</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">options</span><span class="o">(</span><span class="nc">Map</span><span class="o">(</span><span class="s">"delimiter"</span><span class="o">-&gt;</span><span class="s">";"</span><span class="o">,</span> <span class="s">"header"</span><span class="o">-&gt;</span><span class="s">"true"</span><span class="o">)).</span><span class="py">csv</span><span class="o">(</span><span class="n">path</span><span class="o">)</span>
<span class="c1">// "output" is a folder which contains multiple csv files and a _SUCCESS file.</span>
<span class="nv">df3</span><span class="o">.</span><span class="py">write</span><span class="o">.</span><span class="py">csv</span><span class="o">(</span><span class="s">"output"</span><span class="o">)</span>
<span class="c1">// Read all files in a folder, please make sure only CSV files should present in the folder.</span>
<span class="k">val</span> <span class="nv">folderPath</span> <span class="k">=</span> <span class="s">"examples/src/main/resources"</span><span class="o">;</span>
<span class="k">val</span> <span class="nv">df5</span> <span class="k">=</span> <span class="nv">spark</span><span class="o">.</span><span class="py">read</span><span class="o">.</span><span class="py">csv</span><span class="o">(</span><span class="n">folderPath</span><span class="o">);</span>
<span class="nv">df5</span><span class="o">.</span><span class="py">show</span><span class="o">();</span>
<span class="c1">// Wrong schema because non-CSV files are read</span>
<span class="c1">// +-----------+</span>
<span class="c1">// | _c0|</span>
<span class="c1">// +-----------+</span>
<span class="c1">// |238val_238|</span>
<span class="c1">// | 86val_86|</span>
<span class="c1">// |311val_311|</span>
<span class="c1">// | 27val_27|</span>
<span class="c1">// |165val_165|</span>
<span class="c1">// +-----------+</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/scala/org/apache/spark/examples/sql/SQLDataSourceExample.scala" in the Spark repo.</small></div>
</div>
<div data-lang="java">
<div class="highlight"><pre class="codehilite"><code><span class="kn">import</span> <span class="nn">org.apache.spark.sql.Dataset</span><span class="o">;</span>
<span class="kn">import</span> <span class="nn">org.apache.spark.sql.Row</span><span class="o">;</span>
<span class="c1">// A CSV dataset is pointed to by path.</span>
<span class="c1">// The path can be either a single CSV file or a directory of CSV files</span>
<span class="nc">String</span> <span class="n">path</span> <span class="o">=</span> <span class="s">"examples/src/main/resources/people.csv"</span><span class="o">;</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">df</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">csv</span><span class="o">(</span><span class="n">path</span><span class="o">);</span>
<span class="n">df</span><span class="o">.</span><span class="na">show</span><span class="o">();</span>
<span class="c1">// +------------------+</span>
<span class="c1">// | _c0|</span>
<span class="c1">// +------------------+</span>
<span class="c1">// | name;age;job|</span>
<span class="c1">// |Jorge;30;Developer|</span>
<span class="c1">// | Bob;32;Developer|</span>
<span class="c1">// +------------------+</span>
<span class="c1">// Read a csv with delimiter, the default delimiter is ","</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">df2</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">option</span><span class="o">(</span><span class="s">"delimiter"</span><span class="o">,</span> <span class="s">";"</span><span class="o">).</span><span class="na">csv</span><span class="o">(</span><span class="n">path</span><span class="o">);</span>
<span class="n">df2</span><span class="o">.</span><span class="na">show</span><span class="o">();</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// | _c0|_c1| _c2|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// | name|age| job|</span>
<span class="c1">// |Jorge| 30|Developer|</span>
<span class="c1">// | Bob| 32|Developer|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// Read a csv with delimiter and a header</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">df3</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">option</span><span class="o">(</span><span class="s">"delimiter"</span><span class="o">,</span> <span class="s">";"</span><span class="o">).</span><span class="na">option</span><span class="o">(</span><span class="s">"header"</span><span class="o">,</span> <span class="s">"true"</span><span class="o">).</span><span class="na">csv</span><span class="o">(</span><span class="n">path</span><span class="o">);</span>
<span class="n">df3</span><span class="o">.</span><span class="na">show</span><span class="o">();</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// | name|age| job|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// |Jorge| 30|Developer|</span>
<span class="c1">// | Bob| 32|Developer|</span>
<span class="c1">// +-----+---+---------+</span>
<span class="c1">// You can also use options() to use multiple options</span>
<span class="n">java</span><span class="o">.</span><span class="na">util</span><span class="o">.</span><span class="na">Map</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">,</span> <span class="nc">String</span><span class="o">&gt;</span> <span class="n">optionsMap</span> <span class="o">=</span> <span class="k">new</span> <span class="n">java</span><span class="o">.</span><span class="na">util</span><span class="o">.</span><span class="na">HashMap</span><span class="o">&lt;</span><span class="nc">String</span><span class="o">,</span> <span class="nc">String</span><span class="o">&gt;();</span>
<span class="n">optionsMap</span><span class="o">.</span><span class="na">put</span><span class="o">(</span><span class="s">"delimiter"</span><span class="o">,</span><span class="s">";"</span><span class="o">);</span>
<span class="n">optionsMap</span><span class="o">.</span><span class="na">put</span><span class="o">(</span><span class="s">"header"</span><span class="o">,</span><span class="s">"true"</span><span class="o">);</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">df4</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">options</span><span class="o">(</span><span class="n">optionsMap</span><span class="o">).</span><span class="na">csv</span><span class="o">(</span><span class="n">path</span><span class="o">);</span>
<span class="c1">// "output" is a folder which contains multiple csv files and a _SUCCESS file.</span>
<span class="n">df3</span><span class="o">.</span><span class="na">write</span><span class="o">().</span><span class="na">csv</span><span class="o">(</span><span class="s">"output"</span><span class="o">);</span>
<span class="c1">// Read all files in a folder, please make sure only CSV files should present in the folder.</span>
<span class="nc">String</span> <span class="n">folderPath</span> <span class="o">=</span> <span class="s">"examples/src/main/resources"</span><span class="o">;</span>
<span class="nc">Dataset</span><span class="o">&lt;</span><span class="nc">Row</span><span class="o">&gt;</span> <span class="n">df5</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="na">read</span><span class="o">().</span><span class="na">csv</span><span class="o">(</span><span class="n">folderPath</span><span class="o">);</span>
<span class="n">df5</span><span class="o">.</span><span class="na">show</span><span class="o">();</span>
<span class="c1">// Wrong schema because non-CSV files are read</span>
<span class="c1">// +-----------+</span>
<span class="c1">// | _c0|</span>
<span class="c1">// +-----------+</span>
<span class="c1">// |238val_238|</span>
<span class="c1">// | 86val_86|</span>
<span class="c1">// |311val_311|</span>
<span class="c1">// | 27val_27|</span>
<span class="c1">// |165val_165|</span>
<span class="c1">// +-----------+</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java" in the Spark repo.</small></div>
</div>
<div data-lang="python">
<div class="highlight"><pre class="codehilite"><code><span class="c1"># spark is from the previous example
</span><span class="n">sc</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">sparkContext</span>
<span class="c1"># A CSV dataset is pointed to by path.
# The path can be either a single CSV file or a directory of CSV files
</span><span class="n">path</span> <span class="o">=</span> <span class="s">"examples/src/main/resources/people.csv"</span>
<span class="n">df</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="n">csv</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="n">df</span><span class="p">.</span><span class="n">show</span><span class="p">()</span>
<span class="c1"># +------------------+
# | _c0|
# +------------------+
# | name;age;job|
# |Jorge;30;Developer|
# | Bob;32;Developer|
# +------------------+
</span>
<span class="c1"># Read a csv with delimiter, the default delimiter is ","
</span><span class="n">df2</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"delimiter"</span><span class="p">,</span> <span class="s">";"</span><span class="p">).</span><span class="n">csv</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="n">df2</span><span class="p">.</span><span class="n">show</span><span class="p">()</span>
<span class="c1"># +-----+---+---------+
# | _c0|_c1| _c2|
# +-----+---+---------+
# | name|age| job|
# |Jorge| 30|Developer|
# | Bob| 32|Developer|
# +-----+---+---------+
</span>
<span class="c1"># Read a csv with delimiter and a header
</span><span class="n">df3</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="n">option</span><span class="p">(</span><span class="s">"delimiter"</span><span class="p">,</span> <span class="s">";"</span><span class="p">).</span><span class="n">option</span><span class="p">(</span><span class="s">"header"</span><span class="p">,</span> <span class="bp">True</span><span class="p">).</span><span class="n">csv</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="n">df3</span><span class="p">.</span><span class="n">show</span><span class="p">()</span>
<span class="c1"># +-----+---+---------+
# | name|age| job|
# +-----+---+---------+
# |Jorge| 30|Developer|
# | Bob| 32|Developer|
# +-----+---+---------+
</span>
<span class="c1"># You can also use options() to use multiple options
</span><span class="n">df4</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="n">options</span><span class="p">(</span><span class="n">delimiter</span><span class="o">=</span><span class="s">";"</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="bp">True</span><span class="p">).</span><span class="n">csv</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="c1"># "output" is a folder which contains multiple csv files and a _SUCCESS file.
</span><span class="n">df3</span><span class="p">.</span><span class="n">write</span><span class="p">.</span><span class="n">csv</span><span class="p">(</span><span class="s">"output"</span><span class="p">)</span>
<span class="c1"># Read all files in a folder, please make sure only CSV files should present in the folder.
</span><span class="n">folderPath</span> <span class="o">=</span> <span class="s">"examples/src/main/resources"</span>
<span class="n">df5</span> <span class="o">=</span> <span class="n">spark</span><span class="p">.</span><span class="n">read</span><span class="p">.</span><span class="n">csv</span><span class="p">(</span><span class="n">folderPath</span><span class="p">)</span>
<span class="n">df5</span><span class="p">.</span><span class="n">show</span><span class="p">()</span>
<span class="c1"># Wrong schema because non-CSV files are read
# +-----------+
# | _c0|
# +-----------+
# |238val_238|
# | 86val_86|
# |311val_311|
# | 27val_27|
# |165val_165|
# +-----------+</span></code></pre></div>
<div><small>Find full example code at "examples/src/main/python/sql/datasource.py" in the Spark repo.</small></div>
</div>
</div>
<h2 id="data-source-option">Data Source Option</h2>
<p>Data source options of CSV can be set via:</p>
<ul>
<li>the <code class="language-plaintext highlighter-rouge">.option</code>/<code class="language-plaintext highlighter-rouge">.options</code> methods of
<ul>
<li><code class="language-plaintext highlighter-rouge">DataFrameReader</code></li>
<li><code class="language-plaintext highlighter-rouge">DataFrameWriter</code></li>
<li><code class="language-plaintext highlighter-rouge">DataStreamReader</code></li>
<li><code class="language-plaintext highlighter-rouge">DataStreamWriter</code></li>
</ul>
</li>
<li>the built-in functions below
<ul>
<li><code class="language-plaintext highlighter-rouge">from_csv</code></li>
<li><code class="language-plaintext highlighter-rouge">to_csv</code></li>
<li><code class="language-plaintext highlighter-rouge">schema_of_csv</code></li>
</ul>
</li>
<li><code class="language-plaintext highlighter-rouge">OPTIONS</code> clause at <a href="sql-ref-syntax-ddl-create-table-datasource.html">CREATE TABLE USING DATA_SOURCE</a></li>
</ul>
<table class="table">
<tr><th><b>Property Name</b></th><th><b>Default</b></th><th><b>Meaning</b></th><th><b>Scope</b></th></tr>
<tr>
<td><code>sep</code></td>
<td>,</td>
<td>Sets a separator for each field and value. This separator can be one or more characters.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>encoding</code></td>
<td>UTF-8</td>
<td>For reading, decodes the CSV files by the given encoding type. For writing, specifies encoding (charset) of saved CSV files. CSV built-in functions ignore this option.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>quote</code></td>
<td>"</td>
<td>Sets a single character used for escaping quoted values where the separator can be part of the value. For reading, if you would like to turn off quotations, you need to set not <code>null</code> but an empty string. For writing, if an empty string is set, it uses <code>u0000</code> (null character).</td>
<td>read/write</td>
</tr>
<tr>
<td><code>quoteAll</code></td>
<td>false</td>
<td>A flag indicating whether all values should always be enclosed in quotes. Default is to only escape values containing a quote character.</td>
<td>write</td>
</tr>
<tr>
<td><code>escape</code></td>
<td>\</td>
<td>Sets a single character used for escaping quotes inside an already quoted value.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>escapeQuotes</code></td>
<td>true</td>
<td>A flag indicating whether values containing quotes should always be enclosed in quotes. Default is to escape all values containing a quote character.</td>
<td>write</td>
</tr>
<tr>
<td><code>comment</code></td>
<td></td>
<td>Sets a single character used for skipping lines beginning with this character. By default, it is disabled.</td>
<td>read</td>
</tr>
<tr>
<td><code>header</code></td>
<td>false</td>
<td>For reading, uses the first line as names of columns. For writing, writes the names of columns as the first line. Note that if the given path is a RDD of Strings, this header option will remove all lines same with the header if exists. CSV built-in functions ignore this option.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>inferSchema</code></td>
<td>false</td>
<td>Infers the input schema automatically from data. It requires one extra pass over the data. CSV built-in functions ignore this option.</td>
<td>read</td>
</tr>
<tr>
<td><code>enforceSchema</code></td>
<td>true</td>
<td>If it is set to <code>true</code>, the specified or inferred schema will be forcibly applied to datasource files, and headers in CSV files will be ignored. If the option is set to <code>false</code>, the schema will be validated against all headers in CSV files in the case when the <code>header</code> option is set to <code>true</code>. Field names in the schema and column names in CSV headers are checked by their positions taking into account <code>spark.sql.caseSensitive</code>. Though the default value is true, it is recommended to disable the <code>enforceSchema</code> option to avoid incorrect results. CSV built-in functions ignore this option.</td>
<td>read</td>
</tr>
<tr>
<td><code>ignoreLeadingWhiteSpace</code></td>
<td><code>false</code> (for reading), <code>true</code> (for writing)</td>
<td>A flag indicating whether or not leading whitespaces from values being read/written should be skipped.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>ignoreTrailingWhiteSpace</code></td>
<td><code>false</code> (for reading), <code>true</code> (for writing)</td>
<td>A flag indicating whether or not trailing whitespaces from values being read/written should be skipped.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>nullValue</code></td>
<td></td>
<td>Sets the string representation of a null value. Since 2.0.1, this <code>nullValue</code> param applies to all supported types including the string type.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>nanValue</code></td>
<td>NaN</td>
<td>Sets the string representation of a non-number value.</td>
<td>read</td>
</tr>
<tr>
<td><code>positiveInf</code></td>
<td>Inf</td>
<td>Sets the string representation of a positive infinity value.</td>
<td>read</td>
</tr>
<tr>
<td><code>negativeInf</code></td>
<td>-Inf</td>
<td>Sets the string representation of a negative infinity value.</td>
<td>read</td>
</tr>
<tr>
<td><code>dateFormat</code></td>
<td>yyyy-MM-dd</td>
<td>Sets the string that indicates a date format. Custom date formats follow the formats at <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a>. This applies to date type.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>timestampFormat</code></td>
<td>yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]</td>
<td>Sets the string that indicates a timestamp format. Custom date formats follow the formats at <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a>. This applies to timestamp type.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>timestampNTZFormat</code></td>
<td>yyyy-MM-dd'T'HH:mm:ss[.SSS]</td>
<td>Sets the string that indicates a timestamp without timezone format. Custom date formats follow the formats at <a href="https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html">Datetime Patterns</a>. This applies to timestamp without timezone type, note that zone-offset and time-zone components are not supported when writing or reading this data type.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>maxColumns</code></td>
<td>20480</td>
<td>Defines a hard limit of how many columns a record can have.</td>
<td>read</td>
</tr>
<tr>
<td><code>maxCharsPerColumn</code></td>
<td>-1</td>
<td>Defines the maximum number of characters allowed for any given value being read. By default, it is -1 meaning unlimited length</td>
<td>read</td>
</tr>
<tr>
<td><code>mode</code></td>
<td>PERMISSIVE</td>
<td>Allows a mode for dealing with corrupt records during parsing. It supports the following case-insensitive modes. Note that Spark tries to parse only required columns in CSV under column pruning. Therefore, corrupt records can be different based on required set of fields. This behavior can be controlled by <code>spark.sql.csv.parser.columnPruning.enabled</code> (enabled by default).<br />
<ul>
<li><code>PERMISSIVE</code>: when it meets a corrupted record, puts the malformed string into a field configured by <code>columnNameOfCorruptRecord</code>, and sets malformed fields to <code>null</code>. To keep corrupt records, an user can set a string type field named <code>columnNameOfCorruptRecord</code> in an user-defined schema. If a schema does not have the field, it drops corrupt records during parsing. A record with less/more tokens than schema is not a corrupted record to CSV. When it meets a record having fewer tokens than the length of the schema, sets <code>null</code> to extra fields. When the record has more tokens than the length of the schema, it drops extra tokens.</li>
<li><code>DROPMALFORMED</code>: ignores the whole corrupted records. This mode is unsupported in the CSV built-in functions.</li>
<li><code>FAILFAST</code>: throws an exception when it meets corrupted records.</li>
</ul>
</td>
<td>read</td>
</tr>
<tr>
<td><code>columnNameOfCorruptRecord</code></td>
<td>(value of <code>spark.sql.columnNameOfCorruptRecord</code> configuration)</td>
<td>Allows renaming the new field having malformed string created by <code>PERMISSIVE</code> mode. This overrides <code>spark.sql.columnNameOfCorruptRecord</code>.</td>
<td>read</td>
</tr>
<tr>
<td><code>multiLine</code></td>
<td>false</td>
<td>Parse one record, which may span multiple lines, per file. CSV built-in functions ignore this option.</td>
<td>read</td>
</tr>
<tr>
<td><code>charToEscapeQuoteEscaping</code></td>
<td><code>escape</code> or <code>\0</code></td>
<td>Sets a single character used for escaping the escape for the quote character. The default value is escape character when escape and quote characters are different, <code>\0</code> otherwise.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>samplingRatio</code></td>
<td>1.0</td>
<td>Defines fraction of rows used for schema inferring. CSV built-in functions ignore this option.</td>
<td>read</td>
</tr>
<tr>
<td><code>emptyValue</code></td>
<td><code></code> (for reading), <code>""</code> (for writing)</td>
<td>Sets the string representation of an empty value.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>locale</code></td>
<td>en-US</td>
<td>Sets a locale as language tag in IETF BCP 47 format. For instance, this is used while parsing dates and timestamps.</td>
<td>read</td>
</tr>
<tr>
<td><code>lineSep</code></td>
<td><code>\r</code>, <code>\r\n</code> and <code>\n</code> (for reading), <code>\n</code> (for writing)</td>
<td>Defines the line separator that should be used for parsing/writing. Maximum length is 1 character. CSV built-in functions ignore this option.</td>
<td>read/write</td>
</tr>
<tr>
<td><code>unescapedQuoteHandling</code></td>
<td>STOP_AT_DELIMITER</td>
<td>Defines how the CsvParser will handle values with unescaped quotes.<br />
<ul>
<li><code>STOP_AT_CLOSING_QUOTE</code>: If unescaped quotes are found in the input, accumulate the quote character and proceed parsing the value as a quoted value, until a closing quote is found.</li>
<li><code>BACK_TO_DELIMITER</code>: If unescaped quotes are found in the input, consider the value as an unquoted value. This will make the parser accumulate all characters of the current parsed value until the delimiter is found. If no delimiter is found in the value, the parser will continue accumulating characters from the input until a delimiter or line ending is found.</li>
<li><code>STOP_AT_DELIMITER</code>: If unescaped quotes are found in the input, consider the value as an unquoted value. This will make the parser accumulate all characters until the delimiter or a line ending is found in the input.</li>
<li><code>SKIP_VALUE</code>: If unescaped quotes are found in the input, the content parsed for the given value will be skipped and the value set in nullValue will be produced instead.</li>
<li><code>RAISE_ERROR</code>: If unescaped quotes are found in the input, a TextParsingException will be thrown.</li>
</ul>
</td>
<td>read</td>
</tr>
<tr>
<td><code>compression</code></td>
<td>(none)</td>
<td>Compression codec to use when saving to file. This can be one of the known case-insensitive shorten names (<code>none</code>, <code>bzip2</code>, <code>gzip</code>, <code>lz4</code>, <code>snappy</code> and <code>deflate</code>). CSV built-in functions ignore this option.</td>
<td>write</td>
</tr>
</table>
<p>Other generic options can be found in <a href="https://spark.apache.org/docs/latest/sql-data-sources-generic-options.html">Generic File Source Options</a>.</p>
</div>
<!-- /container -->
</div>
<script src="js/vendor/jquery-3.5.1.min.js"></script>
<script src="js/vendor/bootstrap.bundle.min.js"></script>
<script src="js/vendor/anchor.min.js"></script>
<script src="js/main.js"></script>
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/docsearch.js@2/dist/cdn/docsearch.min.js"></script>
<script type="text/javascript">
// DocSearch is entirely free and automated. DocSearch is built in two parts:
// 1. a crawler which we run on our own infrastructure every 24 hours. It follows every link
// in your website and extract content from every page it traverses. It then pushes this
// content to an Algolia index.
// 2. a JavaScript snippet to be inserted in your website that will bind this Algolia index
// to your search input and display its results in a dropdown UI. If you want to find more
// details on how works DocSearch, check the docs of DocSearch.
docsearch({
apiKey: 'd62f962a82bc9abb53471cb7b89da35e',
appId: 'RAI69RXRSK',
indexName: 'apache_spark',
inputSelector: '#docsearch-input',
enhancedSearchInput: true,
algoliaOptions: {
'facetFilters': ["version:3.3.4"]
},
debug: false // Set debug to true if you want to inspect the dropdown
});
</script>
<!-- MathJax Section -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
TeX: { equationNumbers: { autoNumber: "AMS" } }
});
</script>
<script>
// Note that we load MathJax this way to work with local file (file://), HTTP and HTTPS.
// We could use "//cdn.mathjax...", but that won't support "file://".
(function(d, script) {
script = d.createElement('script');
script.type = 'text/javascript';
script.async = true;
script.onload = function(){
MathJax.Hub.Config({
tex2jax: {
inlineMath: [ ["$", "$"], ["\\\\(","\\\\)"] ],
displayMath: [ ["$$","$$"], ["\\[", "\\]"] ],
processEscapes: true,
skipTags: ['script', 'noscript', 'style', 'textarea', 'pre']
}
});
};
script.src = ('https:' == document.location.protocol ? 'https://' : 'http://') +
'cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js' +
'?config=TeX-AMS-MML_HTMLorMML';
d.getElementsByTagName('head')[0].appendChild(script);
}(document));
</script>
</body>
</html>