| |
| <!DOCTYPE html> |
| |
| <html> |
| <head> |
| <meta charset="utf-8" /> |
| <title>pyspark.sql.readwriter — PySpark 3.1.1 documentation</title> |
| |
| <link rel="stylesheet" href="../../../_static/css/index.73d71520a4ca3b99cfee5594769eaaae.css"> |
| |
| |
| <link rel="stylesheet" |
| href="../../../_static/vendor/fontawesome/5.13.0/css/all.min.css"> |
| <link rel="preload" as="font" type="font/woff2" crossorigin |
| href="../../../_static/vendor/fontawesome/5.13.0/webfonts/fa-solid-900.woff2"> |
| <link rel="preload" as="font" type="font/woff2" crossorigin |
| href="../../../_static/vendor/fontawesome/5.13.0/webfonts/fa-brands-400.woff2"> |
| |
| |
| |
| <link rel="stylesheet" |
| href="../../../_static/vendor/open-sans_all/1.44.1/index.css"> |
| <link rel="stylesheet" |
| href="../../../_static/vendor/lato_latin-ext/1.44.1/index.css"> |
| |
| |
| <link rel="stylesheet" href="../../../_static/basic.css" type="text/css" /> |
| <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" /> |
| <link rel="stylesheet" type="text/css" href="../../../_static/css/pyspark.css" /> |
| |
| <link rel="preload" as="script" href="../../../_static/js/index.3da636dd464baa7582d2.js"> |
| |
| <script id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script> |
| <script src="../../../_static/jquery.js"></script> |
| <script src="../../../_static/underscore.js"></script> |
| <script src="../../../_static/doctools.js"></script> |
| <script src="../../../_static/language_data.js"></script> |
| <script src="../../../_static/copybutton.js"></script> |
| <script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script> |
| <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script> |
| <script type="text/x-mathjax-config">MathJax.Hub.Config({"tex2jax": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true, "ignoreClass": "document", "processClass": "math|output_area"}})</script> |
| <link rel="canonical" href="https://spark.apache.org/docs/latest/api/python/_modules/pyspark/sql/readwriter.html" /> |
| <link rel="search" title="Search" href="../../../search.html" /> |
| <meta name="viewport" content="width=device-width, initial-scale=1" /> |
| <meta name="docsearch:language" content="en" /> |
| </head> |
| <body data-spy="scroll" data-target="#bd-toc-nav" data-offset="80"> |
| |
| <nav class="navbar navbar-light navbar-expand-lg bg-light fixed-top bd-navbar" id="navbar-main"> |
| <div class="container-xl"> |
| |
| <a class="navbar-brand" href="../../../index.html"> |
| |
| <img src="../../../_static/spark-logo-reverse.png" class="logo" alt="logo" /> |
| |
| </a> |
| <button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbar-menu" aria-controls="navbar-menu" aria-expanded="false" aria-label="Toggle navigation"> |
| <span class="navbar-toggler-icon"></span> |
| </button> |
| |
| <div id="navbar-menu" class="col-lg-9 collapse navbar-collapse"> |
| <ul id="navbar-main-elements" class="navbar-nav mr-auto"> |
| |
| |
| <li class="nav-item "> |
| <a class="nav-link" href="../../../getting_started/index.html">Getting Started</a> |
| </li> |
| |
| <li class="nav-item "> |
| <a class="nav-link" href="../../../user_guide/index.html">User Guide</a> |
| </li> |
| |
| <li class="nav-item "> |
| <a class="nav-link" href="../../../reference/index.html">API Reference</a> |
| </li> |
| |
| <li class="nav-item "> |
| <a class="nav-link" href="../../../development/index.html">Development</a> |
| </li> |
| |
| <li class="nav-item "> |
| <a class="nav-link" href="../../../migration_guide/index.html">Migration Guide</a> |
| </li> |
| |
| |
| </ul> |
| |
| |
| |
| |
| <ul class="navbar-nav"> |
| |
| |
| </ul> |
| </div> |
| </div> |
| </nav> |
| |
| |
| <div class="container-xl"> |
| <div class="row"> |
| |
| <div class="col-12 col-md-3 bd-sidebar"><form class="bd-search d-flex align-items-center" action="../../../search.html" method="get"> |
| <i class="icon fas fa-search"></i> |
| <input type="search" class="form-control" name="q" id="search-input" placeholder="Search the docs ..." aria-label="Search the docs ..." autocomplete="off" > |
| </form> |
| <nav class="bd-links" id="bd-docs-nav" aria-label="Main navigation"> |
| |
| <div class="bd-toc-item active"> |
| |
| |
| <ul class="nav bd-sidenav"> |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| </ul> |
| |
| </nav> |
| </div> |
| |
| |
| |
| <div class="d-none d-xl-block col-xl-2 bd-toc"> |
| |
| |
| <nav id="bd-toc-nav"> |
| <ul class="nav section-nav flex-column"> |
| |
| </ul> |
| </nav> |
| |
| |
| |
| </div> |
| |
| |
| |
| <main class="col-12 col-md-9 col-xl-7 py-md-5 pl-md-5 pr-md-4 bd-content" role="main"> |
| |
| <div> |
| |
| <h1>Source code for pyspark.sql.readwriter</h1><div class="highlight"><pre> |
| <span></span><span class="c1">#</span> |
| <span class="c1"># Licensed to the Apache Software Foundation (ASF) under one or more</span> |
| <span class="c1"># contributor license agreements. See the NOTICE file distributed with</span> |
| <span class="c1"># this work for additional information regarding copyright ownership.</span> |
| <span class="c1"># The ASF licenses this file to You under the Apache License, Version 2.0</span> |
| <span class="c1"># (the "License"); you may not use this file except in compliance with</span> |
| <span class="c1"># the License. You may obtain a copy of the License at</span> |
| <span class="c1">#</span> |
| <span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span> |
| <span class="c1">#</span> |
| <span class="c1"># Unless required by applicable law or agreed to in writing, software</span> |
| <span class="c1"># distributed under the License is distributed on an "AS IS" BASIS,</span> |
| <span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span> |
| <span class="c1"># See the License for the specific language governing permissions and</span> |
| <span class="c1"># limitations under the License.</span> |
| <span class="c1">#</span> |
| <span class="kn">import</span> <span class="nn">sys</span> |
| |
| <span class="kn">from</span> <span class="nn">py4j.java_gateway</span> <span class="kn">import</span> <span class="n">JavaClass</span> |
| |
| <span class="kn">from</span> <span class="nn">pyspark</span> <span class="kn">import</span> <span class="n">RDD</span><span class="p">,</span> <span class="n">since</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql.column</span> <span class="kn">import</span> <span class="n">_to_seq</span><span class="p">,</span> <span class="n">_to_java_column</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql.types</span> <span class="kn">import</span> <span class="n">StructType</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql</span> <span class="kn">import</span> <span class="n">utils</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql.utils</span> <span class="kn">import</span> <span class="n">to_str</span> |
| |
| <span class="n">__all__</span> <span class="o">=</span> <span class="p">[</span><span class="s2">"DataFrameReader"</span><span class="p">,</span> <span class="s2">"DataFrameWriter"</span><span class="p">]</span> |
| |
| |
| <span class="k">class</span> <span class="nc">OptionUtils</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span> |
| |
| <span class="k">def</span> <span class="nf">_set_opts</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">schema</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Set named options (filter out those the value is None)</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="n">schema</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">schema</span><span class="p">(</span><span class="n">schema</span><span class="p">)</span> |
| <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">options</span><span class="o">.</span><span class="n">items</span><span class="p">():</span> |
| <span class="k">if</span> <span class="n">v</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span> |
| |
| |
| <span class="k">class</span> <span class="nc">DataFrameReader</span><span class="p">(</span><span class="n">OptionUtils</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Interface used to load a :class:`DataFrame` from external storage systems</span> |
| <span class="sd"> (e.g. file systems, key-value stores, etc). Use :attr:`SparkSession.read`</span> |
| <span class="sd"> to access this.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4</span> |
| <span class="sd"> """</span> |
| |
| <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">spark</span><span class="p">):</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="n">_ssql_ctx</span><span class="o">.</span><span class="n">read</span><span class="p">()</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span> <span class="o">=</span> <span class="n">spark</span> |
| |
| <span class="k">def</span> <span class="nf">_df</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">jdf</span><span class="p">):</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql.dataframe</span> <span class="kn">import</span> <span class="n">DataFrame</span> |
| <span class="k">return</span> <span class="n">DataFrame</span><span class="p">(</span><span class="n">jdf</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="p">)</span> |
| |
| <div class="viewcode-block" id="DataFrameReader.format"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.format.html#pyspark.sql.DataFrameReader.format">[docs]</a> <span class="k">def</span> <span class="nf">format</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">source</span><span class="p">):</span> |
| <span class="sd">"""Specifies the input data source format.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> source : str</span> |
| <span class="sd"> string, name of the data source, e.g. 'json', 'parquet'.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df = spark.read.format('json').load('python/test_support/sql/people.json')</span> |
| <span class="sd"> >>> df.dtypes</span> |
| <span class="sd"> [('age', 'bigint'), ('name', 'string')]</span> |
| |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">source</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.schema"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.schema.html#pyspark.sql.DataFrameReader.schema">[docs]</a> <span class="k">def</span> <span class="nf">schema</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">schema</span><span class="p">):</span> |
| <span class="sd">"""Specifies the input schema.</span> |
| |
| <span class="sd"> Some data sources (e.g. JSON) can infer the input schema automatically from data.</span> |
| <span class="sd"> By specifying the schema here, the underlying data source can skip the schema</span> |
| <span class="sd"> inference step, and thus speed up data loading.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> schema : :class:`pyspark.sql.types.StructType` or str</span> |
| <span class="sd"> a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string</span> |
| <span class="sd"> (For example ``col0 INT, col1 DOUBLE``).</span> |
| |
| <span class="sd"> >>> s = spark.read.schema("col0 INT, col1 DOUBLE")</span> |
| <span class="sd"> """</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql</span> <span class="kn">import</span> <span class="n">SparkSession</span> |
| <span class="n">spark</span> <span class="o">=</span> <span class="n">SparkSession</span><span class="o">.</span><span class="n">builder</span><span class="o">.</span><span class="n">getOrCreate</span><span class="p">()</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">schema</span><span class="p">,</span> <span class="n">StructType</span><span class="p">):</span> |
| <span class="n">jschema</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="n">_jsparkSession</span><span class="o">.</span><span class="n">parseDataType</span><span class="p">(</span><span class="n">schema</span><span class="o">.</span><span class="n">json</span><span class="p">())</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">schema</span><span class="p">(</span><span class="n">jschema</span><span class="p">)</span> |
| <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">schema</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">schema</span><span class="p">(</span><span class="n">schema</span><span class="p">)</span> |
| <span class="k">else</span><span class="p">:</span> |
| <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">"schema should be StructType or string"</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.option"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.option.html#pyspark.sql.DataFrameReader.option">[docs]</a> <span class="nd">@since</span><span class="p">(</span><span class="mf">1.5</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">option</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">):</span> |
| <span class="sd">"""Adds an input option for the underlying data source.</span> |
| |
| <span class="sd"> You can set the following option(s) for reading files:</span> |
| <span class="sd"> * ``timeZone``: sets the string that indicates a time zone ID to be used to parse</span> |
| <span class="sd"> timestamps in the JSON/CSV datasources or partition values. The following</span> |
| <span class="sd"> formats of `timeZone` are supported:</span> |
| |
| <span class="sd"> * Region-based zone ID: It should have the form 'area/city', such as \</span> |
| <span class="sd"> 'America/Los_Angeles'.</span> |
| <span class="sd"> * Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \</span> |
| <span class="sd"> '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</span> |
| |
| <span class="sd"> Other short names like 'CST' are not recommended to use because they can be</span> |
| <span class="sd"> ambiguous. If it isn't set, the current value of the SQL config</span> |
| <span class="sd"> ``spark.sql.session.timeZone`` is used by default.</span> |
| <span class="sd"> * ``pathGlobFilter``: an optional glob pattern to only include files with paths matching</span> |
| <span class="sd"> the pattern. The syntax follows org.apache.hadoop.fs.GlobFilter.</span> |
| <span class="sd"> It does not change the behavior of partition discovery.</span> |
| <span class="sd"> * ``modifiedBefore``: an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> * ``modifiedAfter``: an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring after the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="n">key</span><span class="p">,</span> <span class="n">to_str</span><span class="p">(</span><span class="n">value</span><span class="p">))</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.options"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.options.html#pyspark.sql.DataFrameReader.options">[docs]</a> <span class="nd">@since</span><span class="p">(</span><span class="mf">1.4</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">options</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""Adds input options for the underlying data source.</span> |
| |
| <span class="sd"> You can set the following option(s) for reading files:</span> |
| <span class="sd"> * ``timeZone``: sets the string that indicates a time zone ID to be used to parse</span> |
| <span class="sd"> timestamps in the JSON/CSV datasources or partition values. The following</span> |
| <span class="sd"> formats of `timeZone` are supported:</span> |
| |
| <span class="sd"> * Region-based zone ID: It should have the form 'area/city', such as \</span> |
| <span class="sd"> 'America/Los_Angeles'.</span> |
| <span class="sd"> * Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \</span> |
| <span class="sd"> '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</span> |
| |
| <span class="sd"> Other short names like 'CST' are not recommended to use because they can be</span> |
| <span class="sd"> ambiguous. If it isn't set, the current value of the SQL config</span> |
| <span class="sd"> ``spark.sql.session.timeZone`` is used by default.</span> |
| <span class="sd"> * ``pathGlobFilter``: an optional glob pattern to only include files with paths matching</span> |
| <span class="sd"> the pattern. The syntax follows org.apache.hadoop.fs.GlobFilter.</span> |
| <span class="sd"> It does not change the behavior of partition discovery.</span> |
| <span class="sd"> * ``modifiedBefore``: an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> * ``modifiedAfter``: an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring after the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> """</span> |
| <span class="k">for</span> <span class="n">k</span> <span class="ow">in</span> <span class="n">options</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">to_str</span><span class="p">(</span><span class="n">options</span><span class="p">[</span><span class="n">k</span><span class="p">]))</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.load"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.load.html#pyspark.sql.DataFrameReader.load">[docs]</a> <span class="k">def</span> <span class="nf">load</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">schema</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""Loads data from a data source and returns it as a :class:`DataFrame`.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str or list, optional</span> |
| <span class="sd"> optional string or a list of string for file-system backed data sources.</span> |
| <span class="sd"> format : str, optional</span> |
| <span class="sd"> optional string for format of the data source. Default to 'parquet'.</span> |
| <span class="sd"> schema : :class:`pyspark.sql.types.StructType` or str, optional</span> |
| <span class="sd"> optional :class:`pyspark.sql.types.StructType` for the input schema</span> |
| <span class="sd"> or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).</span> |
| <span class="sd"> **options : dict</span> |
| <span class="sd"> all other string options</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df = spark.read.format("parquet").load('python/test_support/sql/parquet_partitioned',</span> |
| <span class="sd"> ... opt1=True, opt2=1, opt3='str')</span> |
| <span class="sd"> >>> df.dtypes</span> |
| <span class="sd"> [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]</span> |
| |
| <span class="sd"> >>> df = spark.read.format('json').load(['python/test_support/sql/people.json',</span> |
| <span class="sd"> ... 'python/test_support/sql/people1.json'])</span> |
| <span class="sd"> >>> df.dtypes</span> |
| <span class="sd"> [('age', 'bigint'), ('aka', 'string'), ('name', 'string')]</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="nb">format</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">format</span><span class="p">)</span> |
| <span class="k">if</span> <span class="n">schema</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">schema</span><span class="p">(</span><span class="n">schema</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">options</span><span class="p">(</span><span class="o">**</span><span class="n">options</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">path</span><span class="p">))</span> |
| <span class="k">elif</span> <span class="n">path</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">path</span><span class="p">)</span> <span class="o">!=</span> <span class="nb">list</span><span class="p">:</span> |
| <span class="n">path</span> <span class="o">=</span> <span class="p">[</span><span class="n">path</span><span class="p">]</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonUtils</span><span class="o">.</span><span class="n">toSeq</span><span class="p">(</span><span class="n">path</span><span class="p">)))</span> |
| <span class="k">else</span><span class="p">:</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">load</span><span class="p">())</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.json"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.json.html#pyspark.sql.DataFrameReader.json">[docs]</a> <span class="k">def</span> <span class="nf">json</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">schema</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">primitivesAsString</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">prefersDecimal</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">allowComments</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">allowUnquotedFieldNames</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">allowSingleQuotes</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">allowNumericLeadingZero</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">allowBackslashEscapingAnyCharacter</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">columnNameOfCorruptRecord</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dateFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">timestampFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">multiLine</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">allowUnquotedControlChars</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">samplingRatio</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">dropFieldIfAllNull</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">locale</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">pathGlobFilter</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">allowNonNumericNumbers</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">modifiedBefore</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">modifiedAfter</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Loads JSON files and returns the results as a :class:`DataFrame`.</span> |
| |
| <span class="sd"> `JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.</span> |
| <span class="sd"> For JSON (one record per file), set the ``multiLine`` parameter to ``true``.</span> |
| |
| <span class="sd"> If the ``schema`` parameter is not specified, this function goes</span> |
| <span class="sd"> through the input once to determine the input schema.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str, list or :class:`RDD`</span> |
| <span class="sd"> string represents path to the JSON dataset, or a list of paths,</span> |
| <span class="sd"> or RDD of Strings storing JSON objects.</span> |
| <span class="sd"> schema : :class:`pyspark.sql.types.StructType` or str, optional</span> |
| <span class="sd"> an optional :class:`pyspark.sql.types.StructType` for the input schema or</span> |
| <span class="sd"> a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).</span> |
| <span class="sd"> primitivesAsString : str or bool, optional</span> |
| <span class="sd"> infers all primitive values as a string type. If None is set,</span> |
| <span class="sd"> it uses the default value, ``false``.</span> |
| <span class="sd"> prefersDecimal : str or bool, optional</span> |
| <span class="sd"> infers all floating-point values as a decimal type. If the values</span> |
| <span class="sd"> do not fit in decimal, then it infers them as doubles. If None is</span> |
| <span class="sd"> set, it uses the default value, ``false``.</span> |
| <span class="sd"> allowComments : str or bool, optional</span> |
| <span class="sd"> ignores Java/C++ style comment in JSON records. If None is set,</span> |
| <span class="sd"> it uses the default value, ``false``.</span> |
| <span class="sd"> allowUnquotedFieldNames : str or bool, optional</span> |
| <span class="sd"> allows unquoted JSON field names. If None is set,</span> |
| <span class="sd"> it uses the default value, ``false``.</span> |
| <span class="sd"> allowSingleQuotes : str or bool, optional</span> |
| <span class="sd"> allows single quotes in addition to double quotes. If None is</span> |
| <span class="sd"> set, it uses the default value, ``true``.</span> |
| <span class="sd"> allowNumericLeadingZero : str or bool, optional</span> |
| <span class="sd"> allows leading zeros in numbers (e.g. 00012). If None is</span> |
| <span class="sd"> set, it uses the default value, ``false``.</span> |
| <span class="sd"> allowBackslashEscapingAnyCharacter : str or bool, optional</span> |
| <span class="sd"> allows accepting quoting of all character</span> |
| <span class="sd"> using backslash quoting mechanism. If None is</span> |
| <span class="sd"> set, it uses the default value, ``false``.</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> allows a mode for dealing with corrupt records during parsing. If None is</span> |
| <span class="sd"> set, it uses the default value, ``PERMISSIVE``.</span> |
| |
| <span class="sd"> * ``PERMISSIVE``: when it meets a corrupted record, puts the malformed string \</span> |
| <span class="sd"> into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \</span> |
| <span class="sd"> fields to ``null``. To keep corrupt records, an user can set a string type \</span> |
| <span class="sd"> field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \</span> |
| <span class="sd"> schema does not have the field, it drops corrupt records during parsing. \</span> |
| <span class="sd"> When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \</span> |
| <span class="sd"> field in an output schema.</span> |
| <span class="sd"> * ``DROPMALFORMED``: ignores the whole corrupted records.</span> |
| <span class="sd"> * ``FAILFAST``: throws an exception when it meets corrupted records.</span> |
| |
| <span class="sd"> columnNameOfCorruptRecord: str, optional</span> |
| <span class="sd"> allows renaming the new field having malformed string</span> |
| <span class="sd"> created by ``PERMISSIVE`` mode. This overrides</span> |
| <span class="sd"> ``spark.sql.columnNameOfCorruptRecord``. If None is set,</span> |
| <span class="sd"> it uses the value specified in</span> |
| <span class="sd"> ``spark.sql.columnNameOfCorruptRecord``.</span> |
| <span class="sd"> dateFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a date format. Custom date formats</span> |
| <span class="sd"> follow the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to date type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd``.</span> |
| <span class="sd"> timestampFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a timestamp format.</span> |
| <span class="sd"> Custom date formats follow the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to timestamp type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.</span> |
| <span class="sd"> multiLine : str or bool, optional</span> |
| <span class="sd"> parse one record, which may span multiple lines, per file. If None is</span> |
| <span class="sd"> set, it uses the default value, ``false``.</span> |
| <span class="sd"> allowUnquotedControlChars : str or bool, optional</span> |
| <span class="sd"> allows JSON Strings to contain unquoted control</span> |
| <span class="sd"> characters (ASCII characters with value less than 32,</span> |
| <span class="sd"> including tab and line feed characters) or not.</span> |
| <span class="sd"> encoding : str or bool, optional</span> |
| <span class="sd"> allows to forcibly set one of standard basic or extended encoding for</span> |
| <span class="sd"> the JSON files. For example UTF-16BE, UTF-32LE. If None is set,</span> |
| <span class="sd"> the encoding of input JSON will be detected automatically</span> |
| <span class="sd"> when the multiLine option is set to ``true``.</span> |
| <span class="sd"> lineSep : str, optional</span> |
| <span class="sd"> defines the line separator that should be used for parsing. If None is</span> |
| <span class="sd"> set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.</span> |
| <span class="sd"> samplingRatio : str or float, optional</span> |
| <span class="sd"> defines fraction of input JSON objects used for schema inferring.</span> |
| <span class="sd"> If None is set, it uses the default value, ``1.0``.</span> |
| <span class="sd"> dropFieldIfAllNull : str or bool, optional</span> |
| <span class="sd"> whether to ignore column of all null values or empty</span> |
| <span class="sd"> array/struct during schema inference. If None is set, it</span> |
| <span class="sd"> uses the default value, ``false``.</span> |
| <span class="sd"> locale : str, optional</span> |
| <span class="sd"> sets a locale as language tag in IETF BCP 47 format. If None is set,</span> |
| <span class="sd"> it uses the default value, ``en-US``. For instance, ``locale`` is used while</span> |
| <span class="sd"> parsing dates and timestamps.</span> |
| <span class="sd"> pathGlobFilter : str or bool, optional</span> |
| <span class="sd"> an optional glob pattern to only include files with paths matching</span> |
| <span class="sd"> the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.</span> |
| <span class="sd"> It does not change the behavior of</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| <span class="sd"> recursiveFileLookup : str or bool, optional</span> |
| <span class="sd"> recursively scan a directory for files. Using this option</span> |
| <span class="sd"> disables</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| <span class="sd"> allowNonNumericNumbers : str or bool</span> |
| <span class="sd"> allows JSON parser to recognize set of "Not-a-Number" (NaN)</span> |
| <span class="sd"> tokens as legal floating number values. If None is set,</span> |
| <span class="sd"> it uses the default value, ``true``.</span> |
| |
| <span class="sd"> * ``+INF``: for positive infinity, as well as alias of</span> |
| <span class="sd"> ``+Infinity`` and ``Infinity``.</span> |
| <span class="sd"> * ``-INF``: for negative infinity, alias ``-Infinity``.</span> |
| <span class="sd"> * ``NaN``: for other not-a-numbers, like result of division by zero.</span> |
| <span class="sd"> modifiedBefore : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedAfter : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring after the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df1 = spark.read.json('python/test_support/sql/people.json')</span> |
| <span class="sd"> >>> df1.dtypes</span> |
| <span class="sd"> [('age', 'bigint'), ('name', 'string')]</span> |
| <span class="sd"> >>> rdd = sc.textFile('python/test_support/sql/people.json')</span> |
| <span class="sd"> >>> df2 = spark.read.json(rdd)</span> |
| <span class="sd"> >>> df2.dtypes</span> |
| <span class="sd"> [('age', 'bigint'), ('name', 'string')]</span> |
| |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span> |
| <span class="n">schema</span><span class="o">=</span><span class="n">schema</span><span class="p">,</span> <span class="n">primitivesAsString</span><span class="o">=</span><span class="n">primitivesAsString</span><span class="p">,</span> <span class="n">prefersDecimal</span><span class="o">=</span><span class="n">prefersDecimal</span><span class="p">,</span> |
| <span class="n">allowComments</span><span class="o">=</span><span class="n">allowComments</span><span class="p">,</span> <span class="n">allowUnquotedFieldNames</span><span class="o">=</span><span class="n">allowUnquotedFieldNames</span><span class="p">,</span> |
| <span class="n">allowSingleQuotes</span><span class="o">=</span><span class="n">allowSingleQuotes</span><span class="p">,</span> <span class="n">allowNumericLeadingZero</span><span class="o">=</span><span class="n">allowNumericLeadingZero</span><span class="p">,</span> |
| <span class="n">allowBackslashEscapingAnyCharacter</span><span class="o">=</span><span class="n">allowBackslashEscapingAnyCharacter</span><span class="p">,</span> |
| <span class="n">mode</span><span class="o">=</span><span class="n">mode</span><span class="p">,</span> <span class="n">columnNameOfCorruptRecord</span><span class="o">=</span><span class="n">columnNameOfCorruptRecord</span><span class="p">,</span> <span class="n">dateFormat</span><span class="o">=</span><span class="n">dateFormat</span><span class="p">,</span> |
| <span class="n">timestampFormat</span><span class="o">=</span><span class="n">timestampFormat</span><span class="p">,</span> <span class="n">multiLine</span><span class="o">=</span><span class="n">multiLine</span><span class="p">,</span> |
| <span class="n">allowUnquotedControlChars</span><span class="o">=</span><span class="n">allowUnquotedControlChars</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="n">lineSep</span><span class="p">,</span> |
| <span class="n">samplingRatio</span><span class="o">=</span><span class="n">samplingRatio</span><span class="p">,</span> <span class="n">dropFieldIfAllNull</span><span class="o">=</span><span class="n">dropFieldIfAllNull</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="n">encoding</span><span class="p">,</span> |
| <span class="n">locale</span><span class="o">=</span><span class="n">locale</span><span class="p">,</span> <span class="n">pathGlobFilter</span><span class="o">=</span><span class="n">pathGlobFilter</span><span class="p">,</span> <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="n">recursiveFileLookup</span><span class="p">,</span> |
| <span class="n">modifiedBefore</span><span class="o">=</span><span class="n">modifiedBefore</span><span class="p">,</span> <span class="n">modifiedAfter</span><span class="o">=</span><span class="n">modifiedAfter</span><span class="p">,</span> |
| <span class="n">allowNonNumericNumbers</span><span class="o">=</span><span class="n">allowNonNumericNumbers</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">path</span> <span class="o">=</span> <span class="p">[</span><span class="n">path</span><span class="p">]</span> |
| <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">path</span><span class="p">)</span> <span class="o">==</span> <span class="nb">list</span><span class="p">:</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">json</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonUtils</span><span class="o">.</span><span class="n">toSeq</span><span class="p">(</span><span class="n">path</span><span class="p">)))</span> |
| <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">RDD</span><span class="p">):</span> |
| <span class="k">def</span> <span class="nf">func</span><span class="p">(</span><span class="n">iterator</span><span class="p">):</span> |
| <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">iterator</span><span class="p">:</span> |
| <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">x</span> <span class="o">=</span> <span class="nb">str</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">x</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">encode</span><span class="p">(</span><span class="s2">"utf-8"</span><span class="p">)</span> |
| <span class="k">yield</span> <span class="n">x</span> |
| <span class="n">keyed</span> <span class="o">=</span> <span class="n">path</span><span class="o">.</span><span class="n">mapPartitions</span><span class="p">(</span><span class="n">func</span><span class="p">)</span> |
| <span class="n">keyed</span><span class="o">.</span><span class="n">_bypass_serializer</span> <span class="o">=</span> <span class="kc">True</span> |
| <span class="n">jrdd</span> <span class="o">=</span> <span class="n">keyed</span><span class="o">.</span><span class="n">_jrdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">BytesToString</span><span class="p">())</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">json</span><span class="p">(</span><span class="n">jrdd</span><span class="p">))</span> |
| <span class="k">else</span><span class="p">:</span> |
| <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">"path can be only string, list or RDD"</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.table"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.table.html#pyspark.sql.DataFrameReader.table">[docs]</a> <span class="k">def</span> <span class="nf">table</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">tableName</span><span class="p">):</span> |
| <span class="sd">"""Returns the specified table as a :class:`DataFrame`.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> tableName : str</span> |
| <span class="sd"> string, name of the table.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')</span> |
| <span class="sd"> >>> df.createOrReplaceTempView('tmpTable')</span> |
| <span class="sd"> >>> spark.read.table('tmpTable').dtypes</span> |
| <span class="sd"> [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]</span> |
| <span class="sd"> """</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">table</span><span class="p">(</span><span class="n">tableName</span><span class="p">))</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.parquet"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.parquet.html#pyspark.sql.DataFrameReader.parquet">[docs]</a> <span class="k">def</span> <span class="nf">parquet</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">paths</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Loads Parquet files, returning the result as a :class:`DataFrame`.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> paths : str</span> |
| |
| <span class="sd"> Other Parameters</span> |
| <span class="sd"> ----------------</span> |
| <span class="sd"> mergeSchema : str or bool, optional</span> |
| <span class="sd"> sets whether we should merge schemas collected from all</span> |
| <span class="sd"> Parquet part-files. This will override</span> |
| <span class="sd"> ``spark.sql.parquet.mergeSchema``. The default value is specified in</span> |
| <span class="sd"> ``spark.sql.parquet.mergeSchema``.</span> |
| <span class="sd"> pathGlobFilter : str or bool, optional</span> |
| <span class="sd"> an optional glob pattern to only include files with paths matching</span> |
| <span class="sd"> the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.</span> |
| <span class="sd"> It does not change the behavior of</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| <span class="sd"> recursiveFileLookup : str or bool, optional</span> |
| <span class="sd"> recursively scan a directory for files. Using this option</span> |
| <span class="sd"> disables</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedBefore (batch only) : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedAfter (batch only) : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring after the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')</span> |
| <span class="sd"> >>> df.dtypes</span> |
| <span class="sd"> [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]</span> |
| <span class="sd"> """</span> |
| <span class="n">mergeSchema</span> <span class="o">=</span> <span class="n">options</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">'mergeSchema'</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span> |
| <span class="n">pathGlobFilter</span> <span class="o">=</span> <span class="n">options</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">'pathGlobFilter'</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span> |
| <span class="n">modifiedBefore</span> <span class="o">=</span> <span class="n">options</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">'modifiedBefore'</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span> |
| <span class="n">modifiedAfter</span> <span class="o">=</span> <span class="n">options</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">'modifiedAfter'</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span> |
| <span class="n">recursiveFileLookup</span> <span class="o">=</span> <span class="n">options</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">'recursiveFileLookup'</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span><span class="n">mergeSchema</span><span class="o">=</span><span class="n">mergeSchema</span><span class="p">,</span> <span class="n">pathGlobFilter</span><span class="o">=</span><span class="n">pathGlobFilter</span><span class="p">,</span> |
| <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="n">recursiveFileLookup</span><span class="p">,</span> <span class="n">modifiedBefore</span><span class="o">=</span><span class="n">modifiedBefore</span><span class="p">,</span> |
| <span class="n">modifiedAfter</span><span class="o">=</span><span class="n">modifiedAfter</span><span class="p">)</span> |
| |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">parquet</span><span class="p">(</span><span class="n">_to_seq</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="p">,</span> <span class="n">paths</span><span class="p">)))</span></div> |
| |
| <span class="k">def</span> <span class="nf">text</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">paths</span><span class="p">,</span> <span class="n">wholetext</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">pathGlobFilter</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">modifiedBefore</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">modifiedAfter</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Loads text files and returns a :class:`DataFrame` whose schema starts with a</span> |
| <span class="sd"> string column named "value", and followed by partitioned columns if there</span> |
| <span class="sd"> are any.</span> |
| <span class="sd"> The text files must be encoded as UTF-8.</span> |
| |
| <span class="sd"> By default, each line in the text file is a new row in the resulting DataFrame.</span> |
| |
| <span class="sd"> .. versionadded:: 1.6.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> paths : str or list</span> |
| <span class="sd"> string, or list of strings, for input path(s).</span> |
| <span class="sd"> wholetext : str or bool, optional</span> |
| <span class="sd"> if true, read each file from input path(s) as a single row.</span> |
| <span class="sd"> lineSep : str, optional</span> |
| <span class="sd"> defines the line separator that should be used for parsing. If None is</span> |
| <span class="sd"> set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.</span> |
| <span class="sd"> pathGlobFilter : str or bool, optional</span> |
| <span class="sd"> an optional glob pattern to only include files with paths matching</span> |
| <span class="sd"> the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.</span> |
| <span class="sd"> It does not change the behavior of</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| <span class="sd"> recursiveFileLookup : str or bool, optional</span> |
| <span class="sd"> recursively scan a directory for files. Using this option disables</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedBefore (batch only) : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedAfter (batch only) : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring after the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df = spark.read.text('python/test_support/sql/text-test.txt')</span> |
| <span class="sd"> >>> df.collect()</span> |
| <span class="sd"> [Row(value='hello'), Row(value='this')]</span> |
| <span class="sd"> >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True)</span> |
| <span class="sd"> >>> df.collect()</span> |
| <span class="sd"> [Row(value='hello\\nthis')]</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span> |
| <span class="n">wholetext</span><span class="o">=</span><span class="n">wholetext</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="n">lineSep</span><span class="p">,</span> <span class="n">pathGlobFilter</span><span class="o">=</span><span class="n">pathGlobFilter</span><span class="p">,</span> |
| <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="n">recursiveFileLookup</span><span class="p">,</span> <span class="n">modifiedBefore</span><span class="o">=</span><span class="n">modifiedBefore</span><span class="p">,</span> |
| <span class="n">modifiedAfter</span><span class="o">=</span><span class="n">modifiedAfter</span><span class="p">)</span> |
| |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">paths</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">paths</span> <span class="o">=</span> <span class="p">[</span><span class="n">paths</span><span class="p">]</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">text</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonUtils</span><span class="o">.</span><span class="n">toSeq</span><span class="p">(</span><span class="n">paths</span><span class="p">)))</span> |
| |
| <div class="viewcode-block" id="DataFrameReader.csv"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.csv.html#pyspark.sql.DataFrameReader.csv">[docs]</a> <span class="k">def</span> <span class="nf">csv</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">schema</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">sep</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">quote</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">escape</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">comment</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">inferSchema</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">ignoreLeadingWhiteSpace</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">ignoreTrailingWhiteSpace</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">nullValue</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">nanValue</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">positiveInf</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">negativeInf</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dateFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">timestampFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">maxColumns</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">maxCharsPerColumn</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">maxMalformedLogPerPartition</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">columnNameOfCorruptRecord</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">multiLine</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">charToEscapeQuoteEscaping</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">samplingRatio</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">enforceSchema</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">emptyValue</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">locale</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">pathGlobFilter</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">modifiedBefore</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">modifiedAfter</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">unescapedQuoteHandling</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sa">r</span><span class="sd">"""Loads a CSV file and returns the result as a :class:`DataFrame`.</span> |
| |
| <span class="sd"> This function will go through the input once to determine the input schema if</span> |
| <span class="sd"> ``inferSchema`` is enabled. To avoid going through the entire data once, disable</span> |
| <span class="sd"> ``inferSchema`` option or specify the schema explicitly using ``schema``.</span> |
| |
| <span class="sd"> .. versionadded:: 2.0.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str or list</span> |
| <span class="sd"> string, or list of strings, for input path(s),</span> |
| <span class="sd"> or RDD of Strings storing CSV rows.</span> |
| <span class="sd"> schema : :class:`pyspark.sql.types.StructType` or str, optional</span> |
| <span class="sd"> an optional :class:`pyspark.sql.types.StructType` for the input schema</span> |
| <span class="sd"> or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).</span> |
| <span class="sd"> sep : str, optional</span> |
| <span class="sd"> sets a separator (one or more characters) for each field and value. If None is</span> |
| <span class="sd"> set, it uses the default value, ``,``.</span> |
| <span class="sd"> encoding : str, optional</span> |
| <span class="sd"> decodes the CSV files by the given encoding type. If None is set,</span> |
| <span class="sd"> it uses the default value, ``UTF-8``.</span> |
| <span class="sd"> quote : str, optional</span> |
| <span class="sd"> sets a single character used for escaping quoted values where the</span> |
| <span class="sd"> separator can be part of the value. If None is set, it uses the default</span> |
| <span class="sd"> value, ``"``. If you would like to turn off quotations, you need to set an</span> |
| <span class="sd"> empty string.</span> |
| <span class="sd"> escape : str, optional</span> |
| <span class="sd"> sets a single character used for escaping quotes inside an already</span> |
| <span class="sd"> quoted value. If None is set, it uses the default value, ``\``.</span> |
| <span class="sd"> comment : str, optional</span> |
| <span class="sd"> sets a single character used for skipping lines beginning with this</span> |
| <span class="sd"> character. By default (None), it is disabled.</span> |
| <span class="sd"> header : str or bool, optional</span> |
| <span class="sd"> uses the first line as names of columns. If None is set, it uses the</span> |
| <span class="sd"> default value, ``false``.</span> |
| |
| <span class="sd"> .. note:: if the given path is a RDD of Strings, this header</span> |
| <span class="sd"> option will remove all lines same with the header if exists.</span> |
| |
| <span class="sd"> inferSchema : str or bool, optional</span> |
| <span class="sd"> infers the input schema automatically from data. It requires one extra</span> |
| <span class="sd"> pass over the data. If None is set, it uses the default value, ``false``.</span> |
| <span class="sd"> enforceSchema : str or bool, optional</span> |
| <span class="sd"> If it is set to ``true``, the specified or inferred schema will be</span> |
| <span class="sd"> forcibly applied to datasource files, and headers in CSV files will be</span> |
| <span class="sd"> ignored. If the option is set to ``false``, the schema will be</span> |
| <span class="sd"> validated against all headers in CSV files or the first header in RDD</span> |
| <span class="sd"> if the ``header`` option is set to ``true``. Field names in the schema</span> |
| <span class="sd"> and column names in CSV headers are checked by their positions</span> |
| <span class="sd"> taking into account ``spark.sql.caseSensitive``. If None is set,</span> |
| <span class="sd"> ``true`` is used by default. Though the default value is ``true``,</span> |
| <span class="sd"> it is recommended to disable the ``enforceSchema`` option</span> |
| <span class="sd"> to avoid incorrect results.</span> |
| <span class="sd"> ignoreLeadingWhiteSpace : str or bool, optional</span> |
| <span class="sd"> A flag indicating whether or not leading whitespaces from</span> |
| <span class="sd"> values being read should be skipped. If None is set, it</span> |
| <span class="sd"> uses the default value, ``false``.</span> |
| <span class="sd"> ignoreTrailingWhiteSpace : str or bool, optional</span> |
| <span class="sd"> A flag indicating whether or not trailing whitespaces from</span> |
| <span class="sd"> values being read should be skipped. If None is set, it</span> |
| <span class="sd"> uses the default value, ``false``.</span> |
| <span class="sd"> nullValue : str, optional</span> |
| <span class="sd"> sets the string representation of a null value. If None is set, it uses</span> |
| <span class="sd"> the default value, empty string. Since 2.0.1, this ``nullValue`` param</span> |
| <span class="sd"> applies to all supported types including the string type.</span> |
| <span class="sd"> nanValue : str, optional</span> |
| <span class="sd"> sets the string representation of a non-number value. If None is set, it</span> |
| <span class="sd"> uses the default value, ``NaN``.</span> |
| <span class="sd"> positiveInf : str, optional</span> |
| <span class="sd"> sets the string representation of a positive infinity value. If None</span> |
| <span class="sd"> is set, it uses the default value, ``Inf``.</span> |
| <span class="sd"> negativeInf : str, optional</span> |
| <span class="sd"> sets the string representation of a negative infinity value. If None</span> |
| <span class="sd"> is set, it uses the default value, ``Inf``.</span> |
| <span class="sd"> dateFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a date format. Custom date formats</span> |
| <span class="sd"> follow the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to date type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd``.</span> |
| <span class="sd"> timestampFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a timestamp format.</span> |
| <span class="sd"> Custom date formats follow the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to timestamp type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.</span> |
| <span class="sd"> maxColumns : str or int, optional</span> |
| <span class="sd"> defines a hard limit of how many columns a record can have. If None is</span> |
| <span class="sd"> set, it uses the default value, ``20480``.</span> |
| <span class="sd"> maxCharsPerColumn : str or int, optional</span> |
| <span class="sd"> defines the maximum number of characters allowed for any given</span> |
| <span class="sd"> value being read. If None is set, it uses the default value,</span> |
| <span class="sd"> ``-1`` meaning unlimited length.</span> |
| <span class="sd"> maxMalformedLogPerPartition : str or int, optional</span> |
| <span class="sd"> this parameter is no longer used since Spark 2.2.0.</span> |
| <span class="sd"> If specified, it is ignored.</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> allows a mode for dealing with corrupt records during parsing. If None is</span> |
| <span class="sd"> set, it uses the default value, ``PERMISSIVE``. Note that Spark tries to</span> |
| <span class="sd"> parse only required columns in CSV under column pruning. Therefore, corrupt</span> |
| <span class="sd"> records can be different based on required set of fields. This behavior can</span> |
| <span class="sd"> be controlled by ``spark.sql.csv.parser.columnPruning.enabled``</span> |
| <span class="sd"> (enabled by default).</span> |
| |
| <span class="sd"> * ``PERMISSIVE``: when it meets a corrupted record, puts the malformed string \</span> |
| <span class="sd"> into a field configured by ``columnNameOfCorruptRecord``, and sets malformed \</span> |
| <span class="sd"> fields to ``null``. To keep corrupt records, an user can set a string type \</span> |
| <span class="sd"> field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \</span> |
| <span class="sd"> schema does not have the field, it drops corrupt records during parsing. \</span> |
| <span class="sd"> A record with less/more tokens than schema is not a corrupted record to CSV. \</span> |
| <span class="sd"> When it meets a record having fewer tokens than the length of the schema, \</span> |
| <span class="sd"> sets ``null`` to extra fields. When the record has more tokens than the \</span> |
| <span class="sd"> length of the schema, it drops extra tokens.</span> |
| <span class="sd"> * ``DROPMALFORMED``: ignores the whole corrupted records.</span> |
| <span class="sd"> * ``FAILFAST``: throws an exception when it meets corrupted records.</span> |
| |
| <span class="sd"> columnNameOfCorruptRecord : str, optional</span> |
| <span class="sd"> allows renaming the new field having malformed string</span> |
| <span class="sd"> created by ``PERMISSIVE`` mode. This overrides</span> |
| <span class="sd"> ``spark.sql.columnNameOfCorruptRecord``. If None is set,</span> |
| <span class="sd"> it uses the value specified in</span> |
| <span class="sd"> ``spark.sql.columnNameOfCorruptRecord``.</span> |
| <span class="sd"> multiLine : str or bool, optional</span> |
| <span class="sd"> parse records, which may span multiple lines. If None is</span> |
| <span class="sd"> set, it uses the default value, ``false``.</span> |
| <span class="sd"> charToEscapeQuoteEscaping : str, optional</span> |
| <span class="sd"> sets a single character used for escaping the escape for</span> |
| <span class="sd"> the quote character. If None is set, the default value is</span> |
| <span class="sd"> escape character when escape and quote characters are</span> |
| <span class="sd"> different, ``\0`` otherwise.</span> |
| <span class="sd"> samplingRatio : str or float, optional</span> |
| <span class="sd"> defines fraction of rows used for schema inferring.</span> |
| <span class="sd"> If None is set, it uses the default value, ``1.0``.</span> |
| <span class="sd"> emptyValue : str, optional</span> |
| <span class="sd"> sets the string representation of an empty value. If None is set, it uses</span> |
| <span class="sd"> the default value, empty string.</span> |
| <span class="sd"> locale : str, optional</span> |
| <span class="sd"> sets a locale as language tag in IETF BCP 47 format. If None is set,</span> |
| <span class="sd"> it uses the default value, ``en-US``. For instance, ``locale`` is used while</span> |
| <span class="sd"> parsing dates and timestamps.</span> |
| <span class="sd"> lineSep : str, optional</span> |
| <span class="sd"> defines the line separator that should be used for parsing. If None is</span> |
| <span class="sd"> set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.</span> |
| <span class="sd"> Maximum length is 1 character.</span> |
| <span class="sd"> pathGlobFilter : str or bool, optional</span> |
| <span class="sd"> an optional glob pattern to only include files with paths matching</span> |
| <span class="sd"> the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.</span> |
| <span class="sd"> It does not change the behavior of</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| <span class="sd"> recursiveFileLookup : str or bool, optional</span> |
| <span class="sd"> recursively scan a directory for files. Using this option disables</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedBefore (batch only) : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedAfter (batch only) : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring after the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> unescapedQuoteHandling : str, optional</span> |
| <span class="sd"> defines how the CsvParser will handle values with unescaped quotes. If None is</span> |
| <span class="sd"> set, it uses the default value, ``STOP_AT_DELIMITER``.</span> |
| |
| <span class="sd"> * ``STOP_AT_CLOSING_QUOTE``: If unescaped quotes are found in the input, accumulate</span> |
| <span class="sd"> the quote character and proceed parsing the value as a quoted value, until a closing</span> |
| <span class="sd"> quote is found.</span> |
| <span class="sd"> * ``BACK_TO_DELIMITER``: If unescaped quotes are found in the input, consider the value</span> |
| <span class="sd"> as an unquoted value. This will make the parser accumulate all characters of the current</span> |
| <span class="sd"> parsed value until the delimiter is found. If no delimiter is found in the value, the</span> |
| <span class="sd"> parser will continue accumulating characters from the input until a delimiter or line</span> |
| <span class="sd"> ending is found.</span> |
| <span class="sd"> * ``STOP_AT_DELIMITER``: If unescaped quotes are found in the input, consider the value</span> |
| <span class="sd"> as an unquoted value. This will make the parser accumulate all characters until the</span> |
| <span class="sd"> delimiter or a line ending is found in the input.</span> |
| <span class="sd"> * ``STOP_AT_DELIMITER``: If unescaped quotes are found in the input, the content parsed</span> |
| <span class="sd"> for the given value will be skipped and the value set in nullValue will be produced</span> |
| <span class="sd"> instead.</span> |
| <span class="sd"> * ``RAISE_ERROR``: If unescaped quotes are found in the input, a TextParsingException</span> |
| <span class="sd"> will be thrown.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df = spark.read.csv('python/test_support/sql/ages.csv')</span> |
| <span class="sd"> >>> df.dtypes</span> |
| <span class="sd"> [('_c0', 'string'), ('_c1', 'string')]</span> |
| <span class="sd"> >>> rdd = sc.textFile('python/test_support/sql/ages.csv')</span> |
| <span class="sd"> >>> df2 = spark.read.csv(rdd)</span> |
| <span class="sd"> >>> df2.dtypes</span> |
| <span class="sd"> [('_c0', 'string'), ('_c1', 'string')]</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span> |
| <span class="n">schema</span><span class="o">=</span><span class="n">schema</span><span class="p">,</span> <span class="n">sep</span><span class="o">=</span><span class="n">sep</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="n">encoding</span><span class="p">,</span> <span class="n">quote</span><span class="o">=</span><span class="n">quote</span><span class="p">,</span> <span class="n">escape</span><span class="o">=</span><span class="n">escape</span><span class="p">,</span> <span class="n">comment</span><span class="o">=</span><span class="n">comment</span><span class="p">,</span> |
| <span class="n">header</span><span class="o">=</span><span class="n">header</span><span class="p">,</span> <span class="n">inferSchema</span><span class="o">=</span><span class="n">inferSchema</span><span class="p">,</span> <span class="n">ignoreLeadingWhiteSpace</span><span class="o">=</span><span class="n">ignoreLeadingWhiteSpace</span><span class="p">,</span> |
| <span class="n">ignoreTrailingWhiteSpace</span><span class="o">=</span><span class="n">ignoreTrailingWhiteSpace</span><span class="p">,</span> <span class="n">nullValue</span><span class="o">=</span><span class="n">nullValue</span><span class="p">,</span> |
| <span class="n">nanValue</span><span class="o">=</span><span class="n">nanValue</span><span class="p">,</span> <span class="n">positiveInf</span><span class="o">=</span><span class="n">positiveInf</span><span class="p">,</span> <span class="n">negativeInf</span><span class="o">=</span><span class="n">negativeInf</span><span class="p">,</span> |
| <span class="n">dateFormat</span><span class="o">=</span><span class="n">dateFormat</span><span class="p">,</span> <span class="n">timestampFormat</span><span class="o">=</span><span class="n">timestampFormat</span><span class="p">,</span> <span class="n">maxColumns</span><span class="o">=</span><span class="n">maxColumns</span><span class="p">,</span> |
| <span class="n">maxCharsPerColumn</span><span class="o">=</span><span class="n">maxCharsPerColumn</span><span class="p">,</span> |
| <span class="n">maxMalformedLogPerPartition</span><span class="o">=</span><span class="n">maxMalformedLogPerPartition</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="n">mode</span><span class="p">,</span> |
| <span class="n">columnNameOfCorruptRecord</span><span class="o">=</span><span class="n">columnNameOfCorruptRecord</span><span class="p">,</span> <span class="n">multiLine</span><span class="o">=</span><span class="n">multiLine</span><span class="p">,</span> |
| <span class="n">charToEscapeQuoteEscaping</span><span class="o">=</span><span class="n">charToEscapeQuoteEscaping</span><span class="p">,</span> <span class="n">samplingRatio</span><span class="o">=</span><span class="n">samplingRatio</span><span class="p">,</span> |
| <span class="n">enforceSchema</span><span class="o">=</span><span class="n">enforceSchema</span><span class="p">,</span> <span class="n">emptyValue</span><span class="o">=</span><span class="n">emptyValue</span><span class="p">,</span> <span class="n">locale</span><span class="o">=</span><span class="n">locale</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="n">lineSep</span><span class="p">,</span> |
| <span class="n">pathGlobFilter</span><span class="o">=</span><span class="n">pathGlobFilter</span><span class="p">,</span> <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="n">recursiveFileLookup</span><span class="p">,</span> |
| <span class="n">modifiedBefore</span><span class="o">=</span><span class="n">modifiedBefore</span><span class="p">,</span> <span class="n">modifiedAfter</span><span class="o">=</span><span class="n">modifiedAfter</span><span class="p">,</span> |
| <span class="n">unescapedQuoteHandling</span><span class="o">=</span><span class="n">unescapedQuoteHandling</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">path</span> <span class="o">=</span> <span class="p">[</span><span class="n">path</span><span class="p">]</span> |
| <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">path</span><span class="p">)</span> <span class="o">==</span> <span class="nb">list</span><span class="p">:</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">csv</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonUtils</span><span class="o">.</span><span class="n">toSeq</span><span class="p">(</span><span class="n">path</span><span class="p">)))</span> |
| <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">RDD</span><span class="p">):</span> |
| <span class="k">def</span> <span class="nf">func</span><span class="p">(</span><span class="n">iterator</span><span class="p">):</span> |
| <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">iterator</span><span class="p">:</span> |
| <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">x</span> <span class="o">=</span> <span class="nb">str</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">x</span> <span class="o">=</span> <span class="n">x</span><span class="o">.</span><span class="n">encode</span><span class="p">(</span><span class="s2">"utf-8"</span><span class="p">)</span> |
| <span class="k">yield</span> <span class="n">x</span> |
| <span class="n">keyed</span> <span class="o">=</span> <span class="n">path</span><span class="o">.</span><span class="n">mapPartitions</span><span class="p">(</span><span class="n">func</span><span class="p">)</span> |
| <span class="n">keyed</span><span class="o">.</span><span class="n">_bypass_serializer</span> <span class="o">=</span> <span class="kc">True</span> |
| <span class="n">jrdd</span> <span class="o">=</span> <span class="n">keyed</span><span class="o">.</span><span class="n">_jrdd</span><span class="o">.</span><span class="n">map</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">BytesToString</span><span class="p">())</span> |
| <span class="c1"># see SPARK-22112</span> |
| <span class="c1"># There aren't any jvm api for creating a dataframe from rdd storing csv.</span> |
| <span class="c1"># We can do it through creating a jvm dataset firstly and using the jvm api</span> |
| <span class="c1"># for creating a dataframe from dataset storing csv.</span> |
| <span class="n">jdataset</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_ssql_ctx</span><span class="o">.</span><span class="n">createDataset</span><span class="p">(</span> |
| <span class="n">jrdd</span><span class="o">.</span><span class="n">rdd</span><span class="p">(),</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">Encoders</span><span class="o">.</span><span class="n">STRING</span><span class="p">())</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">csv</span><span class="p">(</span><span class="n">jdataset</span><span class="p">))</span> |
| <span class="k">else</span><span class="p">:</span> |
| <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">"path can be only string, list or RDD"</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.orc"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.orc.html#pyspark.sql.DataFrameReader.orc">[docs]</a> <span class="k">def</span> <span class="nf">orc</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">mergeSchema</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">pathGlobFilter</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">modifiedBefore</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">modifiedAfter</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""Loads ORC files, returning the result as a :class:`DataFrame`.</span> |
| |
| <span class="sd"> .. versionadded:: 1.5.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str or list</span> |
| <span class="sd"> mergeSchema : str or bool, optional</span> |
| <span class="sd"> sets whether we should merge schemas collected from all</span> |
| <span class="sd"> ORC part-files. This will override ``spark.sql.orc.mergeSchema``.</span> |
| <span class="sd"> The default value is specified in ``spark.sql.orc.mergeSchema``.</span> |
| <span class="sd"> pathGlobFilter : str or bool</span> |
| <span class="sd"> an optional glob pattern to only include files with paths matching</span> |
| <span class="sd"> the pattern. The syntax follows `org.apache.hadoop.fs.GlobFilter`.</span> |
| <span class="sd"> It does not change the behavior of</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| <span class="sd"> recursiveFileLookup : str or bool</span> |
| <span class="sd"> recursively scan a directory for files. Using this option</span> |
| <span class="sd"> disables</span> |
| <span class="sd"> `partition discovery <https://spark.apache.org/docs/latest/sql-data-sources-parquet.html#partition-discovery>`_. # noqa</span> |
| |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedBefore : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring before the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| <span class="sd"> modifiedAfter : an optional timestamp to only include files with</span> |
| <span class="sd"> modification times occurring after the specified time. The provided timestamp</span> |
| <span class="sd"> must be in the following format: YYYY-MM-DDTHH:mm:ss (e.g. 2020-06-01T13:00:00)</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df = spark.read.orc('python/test_support/sql/orc_partitioned')</span> |
| <span class="sd"> >>> df.dtypes</span> |
| <span class="sd"> [('a', 'bigint'), ('b', 'int'), ('c', 'int')]</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span><span class="n">mergeSchema</span><span class="o">=</span><span class="n">mergeSchema</span><span class="p">,</span> <span class="n">pathGlobFilter</span><span class="o">=</span><span class="n">pathGlobFilter</span><span class="p">,</span> |
| <span class="n">modifiedBefore</span><span class="o">=</span><span class="n">modifiedBefore</span><span class="p">,</span> <span class="n">modifiedAfter</span><span class="o">=</span><span class="n">modifiedAfter</span><span class="p">,</span> |
| <span class="n">recursiveFileLookup</span><span class="o">=</span><span class="n">recursiveFileLookup</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="nb">str</span><span class="p">):</span> |
| <span class="n">path</span> <span class="o">=</span> <span class="p">[</span><span class="n">path</span><span class="p">]</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">orc</span><span class="p">(</span><span class="n">_to_seq</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="p">,</span> <span class="n">path</span><span class="p">)))</span></div> |
| |
| <div class="viewcode-block" id="DataFrameReader.jdbc"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameReader.jdbc.html#pyspark.sql.DataFrameReader.jdbc">[docs]</a> <span class="k">def</span> <span class="nf">jdbc</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">url</span><span class="p">,</span> <span class="n">table</span><span class="p">,</span> <span class="n">column</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">lowerBound</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">upperBound</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">numPartitions</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">predicates</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">properties</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Construct a :class:`DataFrame` representing the database table named ``table``</span> |
| <span class="sd"> accessible via JDBC URL ``url`` and connection ``properties``.</span> |
| |
| <span class="sd"> Partitions of the table will be retrieved in parallel if either ``column`` or</span> |
| <span class="sd"> ``predicates`` is specified. ``lowerBound``, ``upperBound`` and ``numPartitions``</span> |
| <span class="sd"> is needed when ``column`` is specified.</span> |
| |
| <span class="sd"> If both ``column`` and ``predicates`` are specified, ``column`` will be used.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> url : str</span> |
| <span class="sd"> a JDBC URL of the form ``jdbc:subprotocol:subname``</span> |
| <span class="sd"> table : str</span> |
| <span class="sd"> the name of the table</span> |
| <span class="sd"> column : str, optional</span> |
| <span class="sd"> the name of a column of numeric, date, or timestamp type</span> |
| <span class="sd"> that will be used for partitioning;</span> |
| <span class="sd"> if this parameter is specified, then ``numPartitions``, ``lowerBound``</span> |
| <span class="sd"> (inclusive), and ``upperBound`` (exclusive) will form partition strides</span> |
| <span class="sd"> for generated WHERE clause expressions used to split the column</span> |
| <span class="sd"> ``column`` evenly</span> |
| <span class="sd"> lowerBound : str or int, optional</span> |
| <span class="sd"> the minimum value of ``column`` used to decide partition stride</span> |
| <span class="sd"> upperBound : str or int, optional</span> |
| <span class="sd"> the maximum value of ``column`` used to decide partition stride</span> |
| <span class="sd"> numPartitions : int, optional</span> |
| <span class="sd"> the number of partitions</span> |
| <span class="sd"> predicates : list, optional</span> |
| <span class="sd"> a list of expressions suitable for inclusion in WHERE clauses;</span> |
| <span class="sd"> each one defines one partition of the :class:`DataFrame`</span> |
| <span class="sd"> properties : dict, optional</span> |
| <span class="sd"> a dictionary of JDBC database connection arguments. Normally at</span> |
| <span class="sd"> least properties "user" and "password" with their corresponding values.</span> |
| <span class="sd"> For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }</span> |
| |
| <span class="sd"> Notes</span> |
| <span class="sd"> -----</span> |
| <span class="sd"> Don't create too many partitions in parallel on a large cluster;</span> |
| <span class="sd"> otherwise Spark might crash your external database systems.</span> |
| |
| <span class="sd"> Returns</span> |
| <span class="sd"> -------</span> |
| <span class="sd"> :class:`DataFrame`</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="n">properties</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="n">properties</span> <span class="o">=</span> <span class="nb">dict</span><span class="p">()</span> |
| <span class="n">jprop</span> <span class="o">=</span> <span class="n">JavaClass</span><span class="p">(</span><span class="s2">"java.util.Properties"</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="o">.</span><span class="n">_gateway</span><span class="o">.</span><span class="n">_gateway_client</span><span class="p">)()</span> |
| <span class="k">for</span> <span class="n">k</span> <span class="ow">in</span> <span class="n">properties</span><span class="p">:</span> |
| <span class="n">jprop</span><span class="o">.</span><span class="n">setProperty</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">properties</span><span class="p">[</span><span class="n">k</span><span class="p">])</span> |
| <span class="k">if</span> <span class="n">column</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="k">assert</span> <span class="n">lowerBound</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">,</span> <span class="s2">"lowerBound can not be None when ``column`` is specified"</span> |
| <span class="k">assert</span> <span class="n">upperBound</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">,</span> <span class="s2">"upperBound can not be None when ``column`` is specified"</span> |
| <span class="k">assert</span> <span class="n">numPartitions</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">,</span> \ |
| <span class="s2">"numPartitions can not be None when ``column`` is specified"</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">jdbc</span><span class="p">(</span><span class="n">url</span><span class="p">,</span> <span class="n">table</span><span class="p">,</span> <span class="n">column</span><span class="p">,</span> <span class="nb">int</span><span class="p">(</span><span class="n">lowerBound</span><span class="p">),</span> <span class="nb">int</span><span class="p">(</span><span class="n">upperBound</span><span class="p">),</span> |
| <span class="nb">int</span><span class="p">(</span><span class="n">numPartitions</span><span class="p">),</span> <span class="n">jprop</span><span class="p">))</span> |
| <span class="k">if</span> <span class="n">predicates</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="n">gateway</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="o">.</span><span class="n">_gateway</span> |
| <span class="n">jpredicates</span> <span class="o">=</span> <span class="n">utils</span><span class="o">.</span><span class="n">toJArray</span><span class="p">(</span><span class="n">gateway</span><span class="p">,</span> <span class="n">gateway</span><span class="o">.</span><span class="n">jvm</span><span class="o">.</span><span class="n">java</span><span class="o">.</span><span class="n">lang</span><span class="o">.</span><span class="n">String</span><span class="p">,</span> <span class="n">predicates</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">jdbc</span><span class="p">(</span><span class="n">url</span><span class="p">,</span> <span class="n">table</span><span class="p">,</span> <span class="n">jpredicates</span><span class="p">,</span> <span class="n">jprop</span><span class="p">))</span> |
| <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_df</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jreader</span><span class="o">.</span><span class="n">jdbc</span><span class="p">(</span><span class="n">url</span><span class="p">,</span> <span class="n">table</span><span class="p">,</span> <span class="n">jprop</span><span class="p">))</span></div> |
| |
| |
| <span class="k">class</span> <span class="nc">DataFrameWriter</span><span class="p">(</span><span class="n">OptionUtils</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Interface used to write a :class:`DataFrame` to external storage systems</span> |
| <span class="sd"> (e.g. file systems, key-value stores, etc). Use :attr:`DataFrame.write`</span> |
| <span class="sd"> to access this.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4</span> |
| <span class="sd"> """</span> |
| <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">df</span><span class="p">):</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_df</span> <span class="o">=</span> <span class="n">df</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">sql_ctx</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">_jdf</span><span class="o">.</span><span class="n">write</span><span class="p">()</span> |
| |
| <span class="k">def</span> <span class="nf">_sq</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">jsq</span><span class="p">):</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql.streaming</span> <span class="kn">import</span> <span class="n">StreamingQuery</span> |
| <span class="k">return</span> <span class="n">StreamingQuery</span><span class="p">(</span><span class="n">jsq</span><span class="p">)</span> |
| |
| <div class="viewcode-block" id="DataFrameWriter.mode"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.mode.html#pyspark.sql.DataFrameWriter.mode">[docs]</a> <span class="k">def</span> <span class="nf">mode</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">saveMode</span><span class="p">):</span> |
| <span class="sd">"""Specifies the behavior when data or table already exists.</span> |
| |
| <span class="sd"> Options include:</span> |
| |
| <span class="sd"> * `append`: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * `overwrite`: Overwrite existing data.</span> |
| <span class="sd"> * `error` or `errorifexists`: Throw an exception if data already exists.</span> |
| <span class="sd"> * `ignore`: Silently ignore this operation if data already exists.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="c1"># At the JVM side, the default value of mode is already set to "error".</span> |
| <span class="c1"># So, if the given saveMode is None, we will not call JVM-side's mode method.</span> |
| <span class="k">if</span> <span class="n">saveMode</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">saveMode</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.format"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.format.html#pyspark.sql.DataFrameWriter.format">[docs]</a> <span class="k">def</span> <span class="nf">format</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">source</span><span class="p">):</span> |
| <span class="sd">"""Specifies the underlying output data source.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> source : str</span> |
| <span class="sd"> string, name of the data source, e.g. 'json', 'parquet'.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">source</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.option"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.option.html#pyspark.sql.DataFrameWriter.option">[docs]</a> <span class="nd">@since</span><span class="p">(</span><span class="mf">1.5</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">option</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">):</span> |
| <span class="sd">"""Adds an output option for the underlying data source.</span> |
| |
| <span class="sd"> You can set the following option(s) for writing files:</span> |
| <span class="sd"> * ``timeZone``: sets the string that indicates a time zone ID to be used to format</span> |
| <span class="sd"> timestamps in the JSON/CSV datasources or partition values. The following</span> |
| <span class="sd"> formats of `timeZone` are supported:</span> |
| |
| <span class="sd"> * Region-based zone ID: It should have the form 'area/city', such as \</span> |
| <span class="sd"> 'America/Los_Angeles'.</span> |
| <span class="sd"> * Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \</span> |
| <span class="sd"> '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</span> |
| |
| <span class="sd"> Other short names like 'CST' are not recommended to use because they can be</span> |
| <span class="sd"> ambiguous. If it isn't set, the current value of the SQL config</span> |
| <span class="sd"> ``spark.sql.session.timeZone`` is used by default.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="n">key</span><span class="p">,</span> <span class="n">to_str</span><span class="p">(</span><span class="n">value</span><span class="p">))</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.options"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.options.html#pyspark.sql.DataFrameWriter.options">[docs]</a> <span class="nd">@since</span><span class="p">(</span><span class="mf">1.4</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">options</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""Adds output options for the underlying data source.</span> |
| |
| <span class="sd"> You can set the following option(s) for writing files:</span> |
| <span class="sd"> * ``timeZone``: sets the string that indicates a time zone ID to be used to format</span> |
| <span class="sd"> timestamps in the JSON/CSV datasources or partition values. The following</span> |
| <span class="sd"> formats of `timeZone` are supported:</span> |
| |
| <span class="sd"> * Region-based zone ID: It should have the form 'area/city', such as \</span> |
| <span class="sd"> 'America/Los_Angeles'.</span> |
| <span class="sd"> * Zone offset: It should be in the format '(+|-)HH:mm', for example '-08:00' or \</span> |
| <span class="sd"> '+01:00'. Also 'UTC' and 'Z' are supported as aliases of '+00:00'.</span> |
| |
| <span class="sd"> Other short names like 'CST' are not recommended to use because they can be</span> |
| <span class="sd"> ambiguous. If it isn't set, the current value of the SQL config</span> |
| <span class="sd"> ``spark.sql.session.timeZone`` is used by default.</span> |
| <span class="sd"> """</span> |
| <span class="k">for</span> <span class="n">k</span> <span class="ow">in</span> <span class="n">options</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">to_str</span><span class="p">(</span><span class="n">options</span><span class="p">[</span><span class="n">k</span><span class="p">]))</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.partitionBy"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.partitionBy.html#pyspark.sql.DataFrameWriter.partitionBy">[docs]</a> <span class="k">def</span> <span class="nf">partitionBy</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">cols</span><span class="p">):</span> |
| <span class="sd">"""Partitions the output by the given columns on the file system.</span> |
| |
| <span class="sd"> If specified, the output is laid out on the file system similar</span> |
| <span class="sd"> to Hive's partitioning scheme.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> cols : str or list</span> |
| <span class="sd"> name of columns</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">cols</span><span class="p">)</span> <span class="o">==</span> <span class="mi">1</span> <span class="ow">and</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">cols</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span> |
| <span class="n">cols</span> <span class="o">=</span> <span class="n">cols</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">partitionBy</span><span class="p">(</span><span class="n">_to_seq</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="p">,</span> <span class="n">cols</span><span class="p">))</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.bucketBy"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.bucketBy.html#pyspark.sql.DataFrameWriter.bucketBy">[docs]</a> <span class="k">def</span> <span class="nf">bucketBy</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">numBuckets</span><span class="p">,</span> <span class="n">col</span><span class="p">,</span> <span class="o">*</span><span class="n">cols</span><span class="p">):</span> |
| <span class="sd">"""Buckets the output by the given columns.If specified,</span> |
| <span class="sd"> the output is laid out on the file system similar to Hive's bucketing scheme.</span> |
| |
| <span class="sd"> .. versionadded:: 2.3.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> numBuckets : int</span> |
| <span class="sd"> the number of buckets to save</span> |
| <span class="sd"> col : str, list or tuple</span> |
| <span class="sd"> a name of a column, or a list of names.</span> |
| <span class="sd"> cols : str</span> |
| <span class="sd"> additional names (optional). If `col` is a list it should be empty.</span> |
| |
| <span class="sd"> Notes</span> |
| <span class="sd"> -----</span> |
| <span class="sd"> Applicable for file-based data sources in combination with</span> |
| <span class="sd"> :py:meth:`DataFrameWriter.saveAsTable`.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> (df.write.format('parquet') # doctest: +SKIP</span> |
| <span class="sd"> ... .bucketBy(100, 'year', 'month')</span> |
| <span class="sd"> ... .mode("overwrite")</span> |
| <span class="sd"> ... .saveAsTable('bucketed_table'))</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">numBuckets</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span> |
| <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">"numBuckets should be an int, got </span><span class="si">{0}</span><span class="s2">."</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">numBuckets</span><span class="p">)))</span> |
| |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">col</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span> |
| <span class="k">if</span> <span class="n">cols</span><span class="p">:</span> |
| <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">"col is a </span><span class="si">{0}</span><span class="s2"> but cols are not empty"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">col</span><span class="p">)))</span> |
| |
| <span class="n">col</span><span class="p">,</span> <span class="n">cols</span> <span class="o">=</span> <span class="n">col</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">col</span><span class="p">[</span><span class="mi">1</span><span class="p">:]</span> |
| |
| <span class="k">if</span> <span class="ow">not</span> <span class="nb">all</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">c</span><span class="p">,</span> <span class="nb">str</span><span class="p">)</span> <span class="k">for</span> <span class="n">c</span> <span class="ow">in</span> <span class="n">cols</span><span class="p">)</span> <span class="ow">or</span> <span class="ow">not</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">col</span><span class="p">,</span> <span class="nb">str</span><span class="p">)):</span> |
| <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">"all names should be `str`"</span><span class="p">)</span> |
| |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">bucketBy</span><span class="p">(</span><span class="n">numBuckets</span><span class="p">,</span> <span class="n">col</span><span class="p">,</span> <span class="n">_to_seq</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="p">,</span> <span class="n">cols</span><span class="p">))</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.sortBy"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.sortBy.html#pyspark.sql.DataFrameWriter.sortBy">[docs]</a> <span class="k">def</span> <span class="nf">sortBy</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">col</span><span class="p">,</span> <span class="o">*</span><span class="n">cols</span><span class="p">):</span> |
| <span class="sd">"""Sorts the output in each bucket by the given columns on the file system.</span> |
| |
| <span class="sd"> .. versionadded:: 2.3.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> col : str, tuple or list</span> |
| <span class="sd"> a name of a column, or a list of names.</span> |
| <span class="sd"> cols : str</span> |
| <span class="sd"> additional names (optional). If `col` is a list it should be empty.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> (df.write.format('parquet') # doctest: +SKIP</span> |
| <span class="sd"> ... .bucketBy(100, 'year', 'month')</span> |
| <span class="sd"> ... .sortBy('day')</span> |
| <span class="sd"> ... .mode("overwrite")</span> |
| <span class="sd"> ... .saveAsTable('sorted_bucketed_table'))</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">col</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">)):</span> |
| <span class="k">if</span> <span class="n">cols</span><span class="p">:</span> |
| <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">"col is a </span><span class="si">{0}</span><span class="s2"> but cols are not empty"</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">col</span><span class="p">)))</span> |
| |
| <span class="n">col</span><span class="p">,</span> <span class="n">cols</span> <span class="o">=</span> <span class="n">col</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">col</span><span class="p">[</span><span class="mi">1</span><span class="p">:]</span> |
| |
| <span class="k">if</span> <span class="ow">not</span> <span class="nb">all</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">c</span><span class="p">,</span> <span class="nb">str</span><span class="p">)</span> <span class="k">for</span> <span class="n">c</span> <span class="ow">in</span> <span class="n">cols</span><span class="p">)</span> <span class="ow">or</span> <span class="ow">not</span><span class="p">(</span><span class="nb">isinstance</span><span class="p">(</span><span class="n">col</span><span class="p">,</span> <span class="nb">str</span><span class="p">)):</span> |
| <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">"all names should be `str`"</span><span class="p">)</span> |
| |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">sortBy</span><span class="p">(</span><span class="n">col</span><span class="p">,</span> <span class="n">_to_seq</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="p">,</span> <span class="n">cols</span><span class="p">))</span> |
| <span class="k">return</span> <span class="bp">self</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.save"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.save.html#pyspark.sql.DataFrameWriter.save">[docs]</a> <span class="k">def</span> <span class="nf">save</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">partitionBy</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""Saves the contents of the :class:`DataFrame` to a data source.</span> |
| |
| <span class="sd"> The data source is specified by the ``format`` and a set of ``options``.</span> |
| <span class="sd"> If ``format`` is not specified, the default data source configured by</span> |
| <span class="sd"> ``spark.sql.sources.default`` will be used.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str, optional</span> |
| <span class="sd"> the path in a Hadoop supported file system</span> |
| <span class="sd"> format : str, optional</span> |
| <span class="sd"> the format used to save</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> specifies the behavior of the save operation when data already exists.</span> |
| |
| <span class="sd"> * ``append``: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * ``overwrite``: Overwrite existing data.</span> |
| <span class="sd"> * ``ignore``: Silently ignore this operation if data already exists.</span> |
| <span class="sd"> * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \</span> |
| <span class="sd"> exists.</span> |
| <span class="sd"> partitionBy : list, optional</span> |
| <span class="sd"> names of partitioning columns</span> |
| <span class="sd"> **options : dict</span> |
| <span class="sd"> all other string options</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df.write.mode("append").save(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">mode</span><span class="p">)</span><span class="o">.</span><span class="n">options</span><span class="p">(</span><span class="o">**</span><span class="n">options</span><span class="p">)</span> |
| <span class="k">if</span> <span class="n">partitionBy</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">partitionBy</span><span class="p">(</span><span class="n">partitionBy</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">format</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">format</span><span class="p">)</span> |
| <span class="k">if</span> <span class="n">path</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">save</span><span class="p">()</span> |
| <span class="k">else</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">save</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.insertInto"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.insertInto.html#pyspark.sql.DataFrameWriter.insertInto">[docs]</a> <span class="nd">@since</span><span class="p">(</span><span class="mf">1.4</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">insertInto</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">tableName</span><span class="p">,</span> <span class="n">overwrite</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""Inserts the content of the :class:`DataFrame` to the specified table.</span> |
| |
| <span class="sd"> It requires that the schema of the :class:`DataFrame` is the same as the</span> |
| <span class="sd"> schema of the table.</span> |
| |
| <span class="sd"> Optionally overwriting any existing data.</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="n">overwrite</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="s2">"overwrite"</span> <span class="k">if</span> <span class="n">overwrite</span> <span class="k">else</span> <span class="s2">"append"</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">insertInto</span><span class="p">(</span><span class="n">tableName</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.saveAsTable"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.saveAsTable.html#pyspark.sql.DataFrameWriter.saveAsTable">[docs]</a> <span class="k">def</span> <span class="nf">saveAsTable</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">,</span> <span class="nb">format</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">partitionBy</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""Saves the content of the :class:`DataFrame` as the specified table.</span> |
| |
| <span class="sd"> In the case the table already exists, behavior of this function depends on the</span> |
| <span class="sd"> save mode, specified by the `mode` function (default to throwing an exception).</span> |
| <span class="sd"> When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be</span> |
| <span class="sd"> the same as that of the existing table.</span> |
| |
| <span class="sd"> * `append`: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * `overwrite`: Overwrite existing data.</span> |
| <span class="sd"> * `error` or `errorifexists`: Throw an exception if data already exists.</span> |
| <span class="sd"> * `ignore`: Silently ignore this operation if data already exists.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> name : str</span> |
| <span class="sd"> the table name</span> |
| <span class="sd"> format : str, optional</span> |
| <span class="sd"> the format used to save</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \</span> |
| <span class="sd"> (default: error)</span> |
| <span class="sd"> partitionBy : str or list</span> |
| <span class="sd"> names of partitioning columns</span> |
| <span class="sd"> **options : dict</span> |
| <span class="sd"> all other string options</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">mode</span><span class="p">)</span><span class="o">.</span><span class="n">options</span><span class="p">(</span><span class="o">**</span><span class="n">options</span><span class="p">)</span> |
| <span class="k">if</span> <span class="n">partitionBy</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">partitionBy</span><span class="p">(</span><span class="n">partitionBy</span><span class="p">)</span> |
| <span class="k">if</span> <span class="nb">format</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="nb">format</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">saveAsTable</span><span class="p">(</span><span class="n">name</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.json"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.json.html#pyspark.sql.DataFrameWriter.json">[docs]</a> <span class="k">def</span> <span class="nf">json</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">compression</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dateFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">timestampFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">lineSep</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">ignoreNullFields</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""Saves the content of the :class:`DataFrame` in JSON format</span> |
| <span class="sd"> (`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the</span> |
| <span class="sd"> specified path.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str</span> |
| <span class="sd"> the path in any Hadoop supported file system</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> specifies the behavior of the save operation when data already exists.</span> |
| |
| <span class="sd"> * ``append``: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * ``overwrite``: Overwrite existing data.</span> |
| <span class="sd"> * ``ignore``: Silently ignore this operation if data already exists.</span> |
| <span class="sd"> * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \</span> |
| <span class="sd"> exists.</span> |
| <span class="sd"> compression : str, optional</span> |
| <span class="sd"> compression codec to use when saving to file. This can be one of the</span> |
| <span class="sd"> known case-insensitive shorten names (none, bzip2, gzip, lz4,</span> |
| <span class="sd"> snappy and deflate).</span> |
| <span class="sd"> dateFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a date format. Custom date formats</span> |
| <span class="sd"> follow the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to date type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd``.</span> |
| <span class="sd"> timestampFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a timestamp format.</span> |
| <span class="sd"> Custom date formats follow the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to timestamp type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.</span> |
| <span class="sd"> encoding : str, optional</span> |
| <span class="sd"> specifies encoding (charset) of saved json files. If None is set,</span> |
| <span class="sd"> the default UTF-8 charset will be used.</span> |
| <span class="sd"> lineSep : str, optional defines the line separator that should be used for writing. If None is</span> |
| <span class="sd"> set, it uses the default value, ``\\n``.</span> |
| <span class="sd"> ignoreNullFields : str or bool, optional</span> |
| <span class="sd"> Whether to ignore null fields when generating JSON objects.</span> |
| <span class="sd"> If None is set, it uses the default value, ``true``.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">mode</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span> |
| <span class="n">compression</span><span class="o">=</span><span class="n">compression</span><span class="p">,</span> <span class="n">dateFormat</span><span class="o">=</span><span class="n">dateFormat</span><span class="p">,</span> <span class="n">timestampFormat</span><span class="o">=</span><span class="n">timestampFormat</span><span class="p">,</span> |
| <span class="n">lineSep</span><span class="o">=</span><span class="n">lineSep</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="n">encoding</span><span class="p">,</span> <span class="n">ignoreNullFields</span><span class="o">=</span><span class="n">ignoreNullFields</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">json</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.parquet"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.parquet.html#pyspark.sql.DataFrameWriter.parquet">[docs]</a> <span class="k">def</span> <span class="nf">parquet</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">partitionBy</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">compression</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str</span> |
| <span class="sd"> the path in any Hadoop supported file system</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> specifies the behavior of the save operation when data already exists.</span> |
| |
| <span class="sd"> * ``append``: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * ``overwrite``: Overwrite existing data.</span> |
| <span class="sd"> * ``ignore``: Silently ignore this operation if data already exists.</span> |
| <span class="sd"> * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \</span> |
| <span class="sd"> exists.</span> |
| <span class="sd"> partitionBy : str or list, optional</span> |
| <span class="sd"> names of partitioning columns</span> |
| <span class="sd"> compression : str, optional</span> |
| <span class="sd"> compression codec to use when saving to file. This can be one of the</span> |
| <span class="sd"> known case-insensitive shorten names (none, uncompressed, snappy, gzip,</span> |
| <span class="sd"> lzo, brotli, lz4, and zstd). This will override</span> |
| <span class="sd"> ``spark.sql.parquet.compression.codec``. If None is set, it uses the</span> |
| <span class="sd"> value specified in ``spark.sql.parquet.compression.codec``.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">mode</span><span class="p">)</span> |
| <span class="k">if</span> <span class="n">partitionBy</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">partitionBy</span><span class="p">(</span><span class="n">partitionBy</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span><span class="n">compression</span><span class="o">=</span><span class="n">compression</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">parquet</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.text"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.text.html#pyspark.sql.DataFrameWriter.text">[docs]</a> <span class="k">def</span> <span class="nf">text</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">compression</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""Saves the content of the DataFrame in a text file at the specified path.</span> |
| <span class="sd"> The text files will be encoded as UTF-8.</span> |
| |
| <span class="sd"> .. versionadded:: 1.6.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str</span> |
| <span class="sd"> the path in any Hadoop supported file system</span> |
| <span class="sd"> compression : str, optional</span> |
| <span class="sd"> compression codec to use when saving to file. This can be one of the</span> |
| <span class="sd"> known case-insensitive shorten names (none, bzip2, gzip, lz4,</span> |
| <span class="sd"> snappy and deflate).</span> |
| <span class="sd"> lineSep : str, optional</span> |
| <span class="sd"> defines the line separator that should be used for writing. If None is</span> |
| <span class="sd"> set, it uses the default value, ``\\n``.</span> |
| |
| <span class="sd"> The DataFrame must have only one column that is of string type.</span> |
| <span class="sd"> Each row becomes a new line in the output file.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span><span class="n">compression</span><span class="o">=</span><span class="n">compression</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="n">lineSep</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">text</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.csv"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.csv.html#pyspark.sql.DataFrameWriter.csv">[docs]</a> <span class="k">def</span> <span class="nf">csv</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">compression</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">sep</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">quote</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">escape</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">header</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">nullValue</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">escapeQuotes</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">quoteAll</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dateFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">timestampFormat</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">ignoreLeadingWhiteSpace</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">ignoreTrailingWhiteSpace</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> |
| <span class="n">charToEscapeQuoteEscaping</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">encoding</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">emptyValue</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sa">r</span><span class="sd">"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.</span> |
| |
| <span class="sd"> .. versionadded:: 2.0.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str</span> |
| <span class="sd"> the path in any Hadoop supported file system</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> specifies the behavior of the save operation when data already exists.</span> |
| |
| <span class="sd"> * ``append``: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * ``overwrite``: Overwrite existing data.</span> |
| <span class="sd"> * ``ignore``: Silently ignore this operation if data already exists.</span> |
| <span class="sd"> * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \</span> |
| <span class="sd"> exists.</span> |
| |
| <span class="sd"> compression : str, optional</span> |
| <span class="sd"> compression codec to use when saving to file. This can be one of the</span> |
| <span class="sd"> known case-insensitive shorten names (none, bzip2, gzip, lz4,</span> |
| <span class="sd"> snappy and deflate).</span> |
| <span class="sd"> sep : str, optional</span> |
| <span class="sd"> sets a separator (one or more characters) for each field and value. If None is</span> |
| <span class="sd"> set, it uses the default value, ``,``.</span> |
| <span class="sd"> quote : str, optional</span> |
| <span class="sd"> sets a single character used for escaping quoted values where the</span> |
| <span class="sd"> separator can be part of the value. If None is set, it uses the default</span> |
| <span class="sd"> value, ``"``. If an empty string is set, it uses ``u0000`` (null character).</span> |
| <span class="sd"> escape : str, optional</span> |
| <span class="sd"> sets a single character used for escaping quotes inside an already</span> |
| <span class="sd"> quoted value. If None is set, it uses the default value, ``\``</span> |
| <span class="sd"> escapeQuotes : str or bool, optional</span> |
| <span class="sd"> a flag indicating whether values containing quotes should always</span> |
| <span class="sd"> be enclosed in quotes. If None is set, it uses the default value</span> |
| <span class="sd"> ``true``, escaping all values containing a quote character.</span> |
| <span class="sd"> quoteAll : str or bool, optional</span> |
| <span class="sd"> a flag indicating whether all values should always be enclosed in</span> |
| <span class="sd"> quotes. If None is set, it uses the default value ``false``,</span> |
| <span class="sd"> only escaping values containing a quote character.</span> |
| <span class="sd"> header : str or bool, optional</span> |
| <span class="sd"> writes the names of columns as the first line. If None is set, it uses</span> |
| <span class="sd"> the default value, ``false``.</span> |
| <span class="sd"> nullValue : str, optional</span> |
| <span class="sd"> sets the string representation of a null value. If None is set, it uses</span> |
| <span class="sd"> the default value, empty string.</span> |
| <span class="sd"> dateFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a date format. Custom date formats follow</span> |
| <span class="sd"> the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to date type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd``.</span> |
| <span class="sd"> timestampFormat : str, optional</span> |
| <span class="sd"> sets the string that indicates a timestamp format.</span> |
| <span class="sd"> Custom date formats follow the formats at</span> |
| <span class="sd"> `datetime pattern <https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html>`_. # noqa</span> |
| <span class="sd"> This applies to timestamp type. If None is set, it uses the</span> |
| <span class="sd"> default value, ``yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]``.</span> |
| <span class="sd"> ignoreLeadingWhiteSpace : str or bool, optional</span> |
| <span class="sd"> a flag indicating whether or not leading whitespaces from</span> |
| <span class="sd"> values being written should be skipped. If None is set, it</span> |
| <span class="sd"> uses the default value, ``true``.</span> |
| <span class="sd"> ignoreTrailingWhiteSpace : str or bool, optional</span> |
| <span class="sd"> a flag indicating whether or not trailing whitespaces from</span> |
| <span class="sd"> values being written should be skipped. If None is set, it</span> |
| <span class="sd"> uses the default value, ``true``.</span> |
| <span class="sd"> charToEscapeQuoteEscaping : str, optional</span> |
| <span class="sd"> sets a single character used for escaping the escape for</span> |
| <span class="sd"> the quote character. If None is set, the default value is</span> |
| <span class="sd"> escape character when escape and quote characters are</span> |
| <span class="sd"> different, ``\0`` otherwise..</span> |
| <span class="sd"> encoding : str, optional</span> |
| <span class="sd"> sets the encoding (charset) of saved csv files. If None is set,</span> |
| <span class="sd"> the default UTF-8 charset will be used.</span> |
| <span class="sd"> emptyValue : str, optional</span> |
| <span class="sd"> sets the string representation of an empty value. If None is set, it uses</span> |
| <span class="sd"> the default value, ``""``.</span> |
| <span class="sd"> lineSep : str, optional</span> |
| <span class="sd"> defines the line separator that should be used for writing. If None is</span> |
| <span class="sd"> set, it uses the default value, ``\\n``. Maximum length is 1 character.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">mode</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span><span class="n">compression</span><span class="o">=</span><span class="n">compression</span><span class="p">,</span> <span class="n">sep</span><span class="o">=</span><span class="n">sep</span><span class="p">,</span> <span class="n">quote</span><span class="o">=</span><span class="n">quote</span><span class="p">,</span> <span class="n">escape</span><span class="o">=</span><span class="n">escape</span><span class="p">,</span> <span class="n">header</span><span class="o">=</span><span class="n">header</span><span class="p">,</span> |
| <span class="n">nullValue</span><span class="o">=</span><span class="n">nullValue</span><span class="p">,</span> <span class="n">escapeQuotes</span><span class="o">=</span><span class="n">escapeQuotes</span><span class="p">,</span> <span class="n">quoteAll</span><span class="o">=</span><span class="n">quoteAll</span><span class="p">,</span> |
| <span class="n">dateFormat</span><span class="o">=</span><span class="n">dateFormat</span><span class="p">,</span> <span class="n">timestampFormat</span><span class="o">=</span><span class="n">timestampFormat</span><span class="p">,</span> |
| <span class="n">ignoreLeadingWhiteSpace</span><span class="o">=</span><span class="n">ignoreLeadingWhiteSpace</span><span class="p">,</span> |
| <span class="n">ignoreTrailingWhiteSpace</span><span class="o">=</span><span class="n">ignoreTrailingWhiteSpace</span><span class="p">,</span> |
| <span class="n">charToEscapeQuoteEscaping</span><span class="o">=</span><span class="n">charToEscapeQuoteEscaping</span><span class="p">,</span> |
| <span class="n">encoding</span><span class="o">=</span><span class="n">encoding</span><span class="p">,</span> <span class="n">emptyValue</span><span class="o">=</span><span class="n">emptyValue</span><span class="p">,</span> <span class="n">lineSep</span><span class="o">=</span><span class="n">lineSep</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">csv</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.orc"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.orc.html#pyspark.sql.DataFrameWriter.orc">[docs]</a> <span class="k">def</span> <span class="nf">orc</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">partitionBy</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">compression</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.</span> |
| |
| <span class="sd"> .. versionadded:: 1.5.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> path : str</span> |
| <span class="sd"> the path in any Hadoop supported file system</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> specifies the behavior of the save operation when data already exists.</span> |
| |
| <span class="sd"> * ``append``: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * ``overwrite``: Overwrite existing data.</span> |
| <span class="sd"> * ``ignore``: Silently ignore this operation if data already exists.</span> |
| <span class="sd"> * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \</span> |
| <span class="sd"> exists.</span> |
| <span class="sd"> partitionBy : str or list, optional</span> |
| <span class="sd"> names of partitioning columns</span> |
| <span class="sd"> compression : str, optional</span> |
| <span class="sd"> compression codec to use when saving to file. This can be one of the</span> |
| <span class="sd"> known case-insensitive shorten names (none, snappy, zlib, and lzo).</span> |
| <span class="sd"> This will override ``orc.compress`` and</span> |
| <span class="sd"> ``spark.sql.orc.compression.codec``. If None is set, it uses the value</span> |
| <span class="sd"> specified in ``spark.sql.orc.compression.codec``.</span> |
| |
| <span class="sd"> Examples</span> |
| <span class="sd"> --------</span> |
| <span class="sd"> >>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')</span> |
| <span class="sd"> >>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">mode</span><span class="p">)</span> |
| <span class="k">if</span> <span class="n">partitionBy</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">partitionBy</span><span class="p">(</span><span class="n">partitionBy</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_set_opts</span><span class="p">(</span><span class="n">compression</span><span class="o">=</span><span class="n">compression</span><span class="p">)</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">orc</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div> |
| |
| <div class="viewcode-block" id="DataFrameWriter.jdbc"><a class="viewcode-back" href="../../../reference/api/pyspark.sql.DataFrameWriter.jdbc.html#pyspark.sql.DataFrameWriter.jdbc">[docs]</a> <span class="k">def</span> <span class="nf">jdbc</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">url</span><span class="p">,</span> <span class="n">table</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">properties</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span> |
| <span class="sd">"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.</span> |
| |
| <span class="sd"> .. versionadded:: 1.4.0</span> |
| |
| <span class="sd"> Parameters</span> |
| <span class="sd"> ----------</span> |
| <span class="sd"> url : str</span> |
| <span class="sd"> a JDBC URL of the form ``jdbc:subprotocol:subname``</span> |
| <span class="sd"> table : str</span> |
| <span class="sd"> Name of the table in the external database.</span> |
| <span class="sd"> mode : str, optional</span> |
| <span class="sd"> specifies the behavior of the save operation when data already exists.</span> |
| |
| <span class="sd"> * ``append``: Append contents of this :class:`DataFrame` to existing data.</span> |
| <span class="sd"> * ``overwrite``: Overwrite existing data.</span> |
| <span class="sd"> * ``ignore``: Silently ignore this operation if data already exists.</span> |
| <span class="sd"> * ``error`` or ``errorifexists`` (default case): Throw an exception if data already \</span> |
| <span class="sd"> exists.</span> |
| <span class="sd"> properties : dict</span> |
| <span class="sd"> a dictionary of JDBC database connection arguments. Normally at</span> |
| <span class="sd"> least properties "user" and "password" with their corresponding values.</span> |
| <span class="sd"> For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }</span> |
| |
| <span class="sd"> Notes</span> |
| <span class="sd"> -----</span> |
| <span class="sd"> Don't create too many partitions in parallel on a large cluster;</span> |
| <span class="sd"> otherwise Spark might crash your external database systems.</span> |
| <span class="sd"> """</span> |
| <span class="k">if</span> <span class="n">properties</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span> |
| <span class="n">properties</span> <span class="o">=</span> <span class="nb">dict</span><span class="p">()</span> |
| <span class="n">jprop</span> <span class="o">=</span> <span class="n">JavaClass</span><span class="p">(</span><span class="s2">"java.util.Properties"</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="o">.</span><span class="n">_gateway</span><span class="o">.</span><span class="n">_gateway_client</span><span class="p">)()</span> |
| <span class="k">for</span> <span class="n">k</span> <span class="ow">in</span> <span class="n">properties</span><span class="p">:</span> |
| <span class="n">jprop</span><span class="o">.</span><span class="n">setProperty</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">properties</span><span class="p">[</span><span class="n">k</span><span class="p">])</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="p">(</span><span class="n">mode</span><span class="p">)</span><span class="o">.</span><span class="n">_jwrite</span><span class="o">.</span><span class="n">jdbc</span><span class="p">(</span><span class="n">url</span><span class="p">,</span> <span class="n">table</span><span class="p">,</span> <span class="n">jprop</span><span class="p">)</span></div> |
| |
| |
| <span class="k">class</span> <span class="nc">DataFrameWriterV2</span><span class="p">(</span><span class="nb">object</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Interface used to write a class:`pyspark.sql.dataframe.DataFrame`</span> |
| <span class="sd"> to external storage using the v2 API.</span> |
| |
| <span class="sd"> .. versionadded:: 3.1.0</span> |
| <span class="sd"> """</span> |
| |
| <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">df</span><span class="p">,</span> <span class="n">table</span><span class="p">):</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_df</span> <span class="o">=</span> <span class="n">df</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_spark</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">sql_ctx</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span> <span class="o">=</span> <span class="n">df</span><span class="o">.</span><span class="n">_jdf</span><span class="o">.</span><span class="n">writeTo</span><span class="p">(</span><span class="n">table</span><span class="p">)</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">using</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">provider</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Specifies a provider for the underlying output data source.</span> |
| <span class="sd"> Spark's default catalog supports "parquet", "json", etc.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">using</span><span class="p">(</span><span class="n">provider</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">option</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Add a write option.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">option</span><span class="p">(</span><span class="n">key</span><span class="p">,</span> <span class="n">to_str</span><span class="p">(</span><span class="n">value</span><span class="p">))</span> |
| <span class="k">return</span> <span class="bp">self</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">options</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">**</span><span class="n">options</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Add write options.</span> |
| <span class="sd"> """</span> |
| <span class="n">options</span> <span class="o">=</span> <span class="p">{</span><span class="n">k</span><span class="p">:</span> <span class="n">to_str</span><span class="p">(</span><span class="n">v</span><span class="p">)</span> <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">options</span><span class="o">.</span><span class="n">items</span><span class="p">()}</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">options</span><span class="p">(</span><span class="n">options</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">tableProperty</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">property</span><span class="p">,</span> <span class="n">value</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Add table property.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">tableProperty</span><span class="p">(</span><span class="nb">property</span><span class="p">,</span> <span class="n">value</span><span class="p">)</span> |
| <span class="k">return</span> <span class="bp">self</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">partitionedBy</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">col</span><span class="p">,</span> <span class="o">*</span><span class="n">cols</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Partition the output table created by `create`, `createOrReplace`, or `replace` using</span> |
| <span class="sd"> the given columns or transforms.</span> |
| |
| <span class="sd"> When specified, the table data will be stored by these values for efficient reads.</span> |
| |
| <span class="sd"> For example, when a table is partitioned by day, it may be stored</span> |
| <span class="sd"> in a directory layout like:</span> |
| |
| <span class="sd"> * `table/day=2019-06-01/`</span> |
| <span class="sd"> * `table/day=2019-06-02/`</span> |
| |
| <span class="sd"> Partitioning is one of the most widely used techniques to optimize physical data layout.</span> |
| <span class="sd"> It provides a coarse-grained index for skipping unnecessary data reads when queries have</span> |
| <span class="sd"> predicates on the partitioned columns. In order for partitioning to work well, the number</span> |
| <span class="sd"> of distinct values in each column should typically be less than tens of thousands.</span> |
| |
| <span class="sd"> `col` and `cols` support only the following functions:</span> |
| |
| <span class="sd"> * :py:func:`pyspark.sql.functions.years`</span> |
| <span class="sd"> * :py:func:`pyspark.sql.functions.months`</span> |
| <span class="sd"> * :py:func:`pyspark.sql.functions.days`</span> |
| <span class="sd"> * :py:func:`pyspark.sql.functions.hours`</span> |
| <span class="sd"> * :py:func:`pyspark.sql.functions.bucket`</span> |
| |
| <span class="sd"> """</span> |
| <span class="n">col</span> <span class="o">=</span> <span class="n">_to_java_column</span><span class="p">(</span><span class="n">col</span><span class="p">)</span> |
| <span class="n">cols</span> <span class="o">=</span> <span class="n">_to_seq</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_spark</span><span class="o">.</span><span class="n">_sc</span><span class="p">,</span> <span class="p">[</span><span class="n">_to_java_column</span><span class="p">(</span><span class="n">c</span><span class="p">)</span> <span class="k">for</span> <span class="n">c</span> <span class="ow">in</span> <span class="n">cols</span><span class="p">])</span> |
| <span class="k">return</span> <span class="bp">self</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">create</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Create a new table from the contents of the data frame.</span> |
| |
| <span class="sd"> The new table's schema, partition layout, properties, and other configuration will be</span> |
| <span class="sd"> based on the configuration set on this writer.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">create</span><span class="p">()</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">replace</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Replace an existing table with the contents of the data frame.</span> |
| |
| <span class="sd"> The existing table's schema, partition layout, properties, and other configuration will be</span> |
| <span class="sd"> replaced with the contents of the data frame and the configuration set on this writer.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">replace</span><span class="p">()</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">createOrReplace</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Create a new table or replace an existing table with the contents of the data frame.</span> |
| |
| <span class="sd"> The output table's schema, partition layout, properties,</span> |
| <span class="sd"> and other configuration will be based on the contents of the data frame</span> |
| <span class="sd"> and the configuration set on this writer.</span> |
| <span class="sd"> If the table exists, its configuration and data will be replaced.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">createOrReplace</span><span class="p">()</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">append</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Append the contents of the data frame to the output table.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">append</span><span class="p">()</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">overwrite</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">condition</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Overwrite rows matching the given filter condition with the contents of the data frame in</span> |
| <span class="sd"> the output table.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">overwrite</span><span class="p">(</span><span class="n">condition</span><span class="p">)</span> |
| |
| <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span> |
| <span class="k">def</span> <span class="nf">overwritePartitions</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span> |
| <span class="sd">"""</span> |
| <span class="sd"> Overwrite all partition for which the data frame contains at least one row with the contents</span> |
| <span class="sd"> of the data frame in the output table.</span> |
| |
| <span class="sd"> This operation is equivalent to Hive's `INSERT OVERWRITE ... PARTITION`, which replaces</span> |
| <span class="sd"> partitions dynamically depending on the contents of the data frame.</span> |
| <span class="sd"> """</span> |
| <span class="bp">self</span><span class="o">.</span><span class="n">_jwriter</span><span class="o">.</span><span class="n">overwritePartitions</span><span class="p">()</span> |
| |
| |
| <span class="k">def</span> <span class="nf">_test</span><span class="p">():</span> |
| <span class="kn">import</span> <span class="nn">doctest</span> |
| <span class="kn">import</span> <span class="nn">os</span> |
| <span class="kn">import</span> <span class="nn">tempfile</span> |
| <span class="kn">import</span> <span class="nn">py4j</span> |
| <span class="kn">from</span> <span class="nn">pyspark.context</span> <span class="kn">import</span> <span class="n">SparkContext</span> |
| <span class="kn">from</span> <span class="nn">pyspark.sql</span> <span class="kn">import</span> <span class="n">SparkSession</span> |
| <span class="kn">import</span> <span class="nn">pyspark.sql.readwriter</span> |
| |
| <span class="n">os</span><span class="o">.</span><span class="n">chdir</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">"SPARK_HOME"</span><span class="p">])</span> |
| |
| <span class="n">globs</span> <span class="o">=</span> <span class="n">pyspark</span><span class="o">.</span><span class="n">sql</span><span class="o">.</span><span class="n">readwriter</span><span class="o">.</span><span class="vm">__dict__</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span> |
| <span class="n">sc</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="p">(</span><span class="s1">'local[4]'</span><span class="p">,</span> <span class="s1">'PythonTest'</span><span class="p">)</span> |
| <span class="k">try</span><span class="p">:</span> |
| <span class="n">spark</span> <span class="o">=</span> <span class="n">SparkSession</span><span class="o">.</span><span class="n">builder</span><span class="o">.</span><span class="n">getOrCreate</span><span class="p">()</span> |
| <span class="k">except</span> <span class="n">py4j</span><span class="o">.</span><span class="n">protocol</span><span class="o">.</span><span class="n">Py4JError</span><span class="p">:</span> |
| <span class="n">spark</span> <span class="o">=</span> <span class="n">SparkSession</span><span class="p">(</span><span class="n">sc</span><span class="p">)</span> |
| |
| <span class="n">globs</span><span class="p">[</span><span class="s1">'tempfile'</span><span class="p">]</span> <span class="o">=</span> <span class="n">tempfile</span> |
| <span class="n">globs</span><span class="p">[</span><span class="s1">'os'</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span> |
| <span class="n">globs</span><span class="p">[</span><span class="s1">'sc'</span><span class="p">]</span> <span class="o">=</span> <span class="n">sc</span> |
| <span class="n">globs</span><span class="p">[</span><span class="s1">'spark'</span><span class="p">]</span> <span class="o">=</span> <span class="n">spark</span> |
| <span class="n">globs</span><span class="p">[</span><span class="s1">'df'</span><span class="p">]</span> <span class="o">=</span> <span class="n">spark</span><span class="o">.</span><span class="n">read</span><span class="o">.</span><span class="n">parquet</span><span class="p">(</span><span class="s1">'python/test_support/sql/parquet_partitioned'</span><span class="p">)</span> |
| <span class="p">(</span><span class="n">failure_count</span><span class="p">,</span> <span class="n">test_count</span><span class="p">)</span> <span class="o">=</span> <span class="n">doctest</span><span class="o">.</span><span class="n">testmod</span><span class="p">(</span> |
| <span class="n">pyspark</span><span class="o">.</span><span class="n">sql</span><span class="o">.</span><span class="n">readwriter</span><span class="p">,</span> <span class="n">globs</span><span class="o">=</span><span class="n">globs</span><span class="p">,</span> |
| <span class="n">optionflags</span><span class="o">=</span><span class="n">doctest</span><span class="o">.</span><span class="n">ELLIPSIS</span> <span class="o">|</span> <span class="n">doctest</span><span class="o">.</span><span class="n">NORMALIZE_WHITESPACE</span> <span class="o">|</span> <span class="n">doctest</span><span class="o">.</span><span class="n">REPORT_NDIFF</span><span class="p">)</span> |
| <span class="n">sc</span><span class="o">.</span><span class="n">stop</span><span class="p">()</span> |
| <span class="k">if</span> <span class="n">failure_count</span><span class="p">:</span> |
| <span class="n">sys</span><span class="o">.</span><span class="n">exit</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> |
| |
| |
| <span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s2">"__main__"</span><span class="p">:</span> |
| <span class="n">_test</span><span class="p">()</span> |
| </pre></div> |
| |
| </div> |
| |
| |
| <div class='prev-next-bottom'> |
| |
| |
| </div> |
| |
| </main> |
| |
| |
| </div> |
| </div> |
| |
| |
| <script src="../../../_static/js/index.3da636dd464baa7582d2.js"></script> |
| |
| |
| <footer class="footer mt-5 mt-md-0"> |
| <div class="container"> |
| <p> |
| © Copyright .<br/> |
| Created using <a href="http://sphinx-doc.org/">Sphinx</a> 3.0.4.<br/> |
| </p> |
| </div> |
| </footer> |
| </body> |
| </html> |