blob: 855de6537e34156c35655aaa517365ac571d2e89 [file] [log] [blame]
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<title>pyspark.context &#8212; PySpark 3.3.1 documentation</title>
<link rel="stylesheet" href="../../_static/css/index.73d71520a4ca3b99cfee5594769eaaae.css">
<link rel="stylesheet"
href="../../_static/vendor/fontawesome/5.13.0/css/all.min.css">
<link rel="preload" as="font" type="font/woff2" crossorigin
href="../../_static/vendor/fontawesome/5.13.0/webfonts/fa-solid-900.woff2">
<link rel="preload" as="font" type="font/woff2" crossorigin
href="../../_static/vendor/fontawesome/5.13.0/webfonts/fa-brands-400.woff2">
<link rel="stylesheet"
href="../../_static/vendor/open-sans_all/1.44.1/index.css">
<link rel="stylesheet"
href="../../_static/vendor/lato_latin-ext/1.44.1/index.css">
<link rel="stylesheet" href="../../_static/basic.css" type="text/css" />
<link rel="stylesheet" href="../../_static/pygments.css" type="text/css" />
<link rel="stylesheet" type="text/css" href="../../_static/css/pyspark.css" />
<link rel="preload" as="script" href="../../_static/js/index.3da636dd464baa7582d2.js">
<script id="documentation_options" data-url_root="../../" src="../../_static/documentation_options.js"></script>
<script src="../../_static/jquery.js"></script>
<script src="../../_static/underscore.js"></script>
<script src="../../_static/doctools.js"></script>
<script src="../../_static/language_data.js"></script>
<script src="../../_static/copybutton.js"></script>
<script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
<script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/x-mathjax-config">MathJax.Hub.Config({"tex2jax": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true, "ignoreClass": "document", "processClass": "math|output_area"}})</script>
<link rel="search" title="Search" href="../../search.html" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="docsearch:language" content="en" />
</head>
<body data-spy="scroll" data-target="#bd-toc-nav" data-offset="80">
<nav class="navbar navbar-light navbar-expand-lg bg-light fixed-top bd-navbar" id="navbar-main">
<div class="container-xl">
<a class="navbar-brand" href="../../index.html">
<img src="../../_static/spark-logo-reverse.png" class="logo" alt="logo" />
</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbar-menu" aria-controls="navbar-menu" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div id="navbar-menu" class="col-lg-9 collapse navbar-collapse">
<ul id="navbar-main-elements" class="navbar-nav mr-auto">
<li class="nav-item ">
<a class="nav-link" href="../../getting_started/index.html">Getting Started</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="../../user_guide/index.html">User Guide</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="../../reference/index.html">API Reference</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="../../development/index.html">Development</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="../../migration_guide/index.html">Migration Guide</a>
</li>
</ul>
<ul class="navbar-nav">
</ul>
</div>
</div>
</nav>
<div class="container-xl">
<div class="row">
<div class="col-12 col-md-3 bd-sidebar"><form class="bd-search d-flex align-items-center" action="../../search.html" method="get">
<i class="icon fas fa-search"></i>
<input type="search" class="form-control" name="q" id="search-input" placeholder="Search the docs ..." aria-label="Search the docs ..." autocomplete="off" >
</form>
<nav class="bd-links" id="bd-docs-nav" aria-label="Main navigation">
<div class="bd-toc-item active">
<ul class="nav bd-sidenav">
</ul>
</nav>
</div>
<div class="d-none d-xl-block col-xl-2 bd-toc">
<nav id="bd-toc-nav">
<ul class="nav section-nav flex-column">
</ul>
</nav>
</div>
<main class="col-12 col-md-9 col-xl-7 py-md-5 pl-md-5 pr-md-4 bd-content" role="main">
<div>
<h1>Source code for pyspark.context</h1><div class="highlight"><pre>
<span></span><span class="c1">#</span>
<span class="c1"># Licensed to the Apache Software Foundation (ASF) under one or more</span>
<span class="c1"># contributor license agreements. See the NOTICE file distributed with</span>
<span class="c1"># this work for additional information regarding copyright ownership.</span>
<span class="c1"># The ASF licenses this file to You under the Apache License, Version 2.0</span>
<span class="c1"># (the &quot;License&quot;); you may not use this file except in compliance with</span>
<span class="c1"># the License. You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an &quot;AS IS&quot; BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>
<span class="c1"># limitations under the License.</span>
<span class="c1">#</span>
<span class="kn">import</span> <span class="nn">os</span>
<span class="kn">import</span> <span class="nn">shutil</span>
<span class="kn">import</span> <span class="nn">signal</span>
<span class="kn">import</span> <span class="nn">sys</span>
<span class="kn">import</span> <span class="nn">threading</span>
<span class="kn">import</span> <span class="nn">warnings</span>
<span class="kn">import</span> <span class="nn">importlib</span>
<span class="kn">from</span> <span class="nn">threading</span> <span class="kn">import</span> <span class="n">RLock</span>
<span class="kn">from</span> <span class="nn">tempfile</span> <span class="kn">import</span> <span class="n">NamedTemporaryFile</span>
<span class="kn">from</span> <span class="nn">types</span> <span class="kn">import</span> <span class="n">TracebackType</span>
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">Any</span><span class="p">,</span>
<span class="n">Callable</span><span class="p">,</span>
<span class="n">cast</span><span class="p">,</span>
<span class="n">ClassVar</span><span class="p">,</span>
<span class="n">Dict</span><span class="p">,</span>
<span class="n">Iterable</span><span class="p">,</span>
<span class="n">List</span><span class="p">,</span>
<span class="n">NoReturn</span><span class="p">,</span>
<span class="n">Optional</span><span class="p">,</span>
<span class="n">Sequence</span><span class="p">,</span>
<span class="n">Tuple</span><span class="p">,</span>
<span class="n">Type</span><span class="p">,</span>
<span class="n">TYPE_CHECKING</span><span class="p">,</span>
<span class="n">TypeVar</span><span class="p">,</span>
<span class="p">)</span>
<span class="kn">from</span> <span class="nn">py4j.java_collections</span> <span class="kn">import</span> <span class="n">JavaMap</span>
<span class="kn">from</span> <span class="nn">py4j.protocol</span> <span class="kn">import</span> <span class="n">Py4JError</span>
<span class="kn">from</span> <span class="nn">pyspark</span> <span class="kn">import</span> <span class="n">accumulators</span><span class="p">,</span> <span class="n">since</span>
<span class="kn">from</span> <span class="nn">pyspark.accumulators</span> <span class="kn">import</span> <span class="n">Accumulator</span>
<span class="kn">from</span> <span class="nn">pyspark.broadcast</span> <span class="kn">import</span> <span class="n">Broadcast</span><span class="p">,</span> <span class="n">BroadcastPickleRegistry</span>
<span class="kn">from</span> <span class="nn">pyspark.conf</span> <span class="kn">import</span> <span class="n">SparkConf</span>
<span class="kn">from</span> <span class="nn">pyspark.files</span> <span class="kn">import</span> <span class="n">SparkFiles</span>
<span class="kn">from</span> <span class="nn">pyspark.java_gateway</span> <span class="kn">import</span> <span class="n">launch_gateway</span><span class="p">,</span> <span class="n">local_connect_and_auth</span>
<span class="kn">from</span> <span class="nn">pyspark.serializers</span> <span class="kn">import</span> <span class="p">(</span>
<span class="n">CPickleSerializer</span><span class="p">,</span>
<span class="n">BatchedSerializer</span><span class="p">,</span>
<span class="n">Serializer</span><span class="p">,</span>
<span class="n">UTF8Deserializer</span><span class="p">,</span>
<span class="n">PairDeserializer</span><span class="p">,</span>
<span class="n">AutoBatchedSerializer</span><span class="p">,</span>
<span class="n">NoOpSerializer</span><span class="p">,</span>
<span class="n">ChunkedStream</span><span class="p">,</span>
<span class="p">)</span>
<span class="kn">from</span> <span class="nn">pyspark.storagelevel</span> <span class="kn">import</span> <span class="n">StorageLevel</span>
<span class="kn">from</span> <span class="nn">pyspark.resource.information</span> <span class="kn">import</span> <span class="n">ResourceInformation</span>
<span class="kn">from</span> <span class="nn">pyspark.rdd</span> <span class="kn">import</span> <span class="n">RDD</span><span class="p">,</span> <span class="n">_load_from_socket</span>
<span class="kn">from</span> <span class="nn">pyspark.taskcontext</span> <span class="kn">import</span> <span class="n">TaskContext</span>
<span class="kn">from</span> <span class="nn">pyspark.traceback_utils</span> <span class="kn">import</span> <span class="n">CallSite</span><span class="p">,</span> <span class="n">first_spark_call</span>
<span class="kn">from</span> <span class="nn">pyspark.status</span> <span class="kn">import</span> <span class="n">StatusTracker</span>
<span class="kn">from</span> <span class="nn">pyspark.profiler</span> <span class="kn">import</span> <span class="n">ProfilerCollector</span><span class="p">,</span> <span class="n">BasicProfiler</span><span class="p">,</span> <span class="n">UDFBasicProfiler</span>
<span class="kn">from</span> <span class="nn">py4j.java_gateway</span> <span class="kn">import</span> <span class="n">is_instance_of</span><span class="p">,</span> <span class="n">JavaGateway</span><span class="p">,</span> <span class="n">JavaObject</span><span class="p">,</span> <span class="n">JVMView</span>
<span class="k">if</span> <span class="n">TYPE_CHECKING</span><span class="p">:</span>
<span class="kn">from</span> <span class="nn">pyspark.accumulators</span> <span class="kn">import</span> <span class="n">AccumulatorParam</span>
<span class="n">__all__</span> <span class="o">=</span> <span class="p">[</span><span class="s2">&quot;SparkContext&quot;</span><span class="p">]</span>
<span class="c1"># These are special default configs for PySpark, they will overwrite</span>
<span class="c1"># the default ones for Spark if they are not configured by user.</span>
<span class="n">DEFAULT_CONFIGS</span><span class="p">:</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;spark.serializer.objectStreamReset&quot;</span><span class="p">:</span> <span class="mi">100</span><span class="p">,</span>
<span class="s2">&quot;spark.rdd.compress&quot;</span><span class="p">:</span> <span class="kc">True</span><span class="p">,</span>
<span class="p">}</span>
<span class="n">T</span> <span class="o">=</span> <span class="n">TypeVar</span><span class="p">(</span><span class="s2">&quot;T&quot;</span><span class="p">)</span>
<span class="n">U</span> <span class="o">=</span> <span class="n">TypeVar</span><span class="p">(</span><span class="s2">&quot;U&quot;</span><span class="p">)</span>
<div class="viewcode-block" id="SparkContext"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.html#pyspark.SparkContext">[docs]</a><span class="k">class</span> <span class="nc">SparkContext</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Main entry point for Spark functionality. A SparkContext represents the</span>
<span class="sd"> connection to a Spark cluster, and can be used to create :class:`RDD` and</span>
<span class="sd"> broadcast variables on that cluster.</span>
<span class="sd"> When you create a new SparkContext, at least the master and app name should</span>
<span class="sd"> be set, either through the named parameters here or through `conf`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> master : str, optional</span>
<span class="sd"> Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).</span>
<span class="sd"> appName : str, optional</span>
<span class="sd"> A name for your job, to display on the cluster web UI.</span>
<span class="sd"> sparkHome : str, optional</span>
<span class="sd"> Location where Spark is installed on cluster nodes.</span>
<span class="sd"> pyFiles : list, optional</span>
<span class="sd"> Collection of .zip or .py files to send to the cluster</span>
<span class="sd"> and add to PYTHONPATH. These can be paths on the local file</span>
<span class="sd"> system or HDFS, HTTP, HTTPS, or FTP URLs.</span>
<span class="sd"> environment : dict, optional</span>
<span class="sd"> A dictionary of environment variables to set on</span>
<span class="sd"> worker nodes.</span>
<span class="sd"> batchSize : int, optional</span>
<span class="sd"> The number of Python objects represented as a single</span>
<span class="sd"> Java object. Set 1 to disable batching, 0 to automatically choose</span>
<span class="sd"> the batch size based on object sizes, or -1 to use an unlimited</span>
<span class="sd"> batch size</span>
<span class="sd"> serializer : :class:`pyspark.serializers.Serializer`, optional</span>
<span class="sd"> The serializer for RDDs.</span>
<span class="sd"> conf : :py:class:`pyspark.SparkConf`, optional</span>
<span class="sd"> An object setting Spark properties.</span>
<span class="sd"> gateway : :py:class:`py4j.java_gateway.JavaGateway`, optional</span>
<span class="sd"> Use an existing gateway and JVM, otherwise a new JVM</span>
<span class="sd"> will be instantiated. This is only used internally.</span>
<span class="sd"> jsc : :py:class:`py4j.java_gateway.JavaObject`, optional</span>
<span class="sd"> The JavaSparkContext instance. This is only used internally.</span>
<span class="sd"> profiler_cls : type, optional</span>
<span class="sd"> A class of custom Profiler used to do profiling</span>
<span class="sd"> (default is :class:`pyspark.profiler.BasicProfiler`).</span>
<span class="sd"> udf_profiler_cls : type, optional</span>
<span class="sd"> A class of custom Profiler used to do udf profiling</span>
<span class="sd"> (default is :class:`pyspark.profiler.UDFBasicProfiler`).</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> Only one :class:`SparkContext` should be active per JVM. You must `stop()`</span>
<span class="sd"> the active :class:`SparkContext` before creating a new one.</span>
<span class="sd"> :class:`SparkContext` instance is not supported to share across multiple</span>
<span class="sd"> processes out of the box, and PySpark does not guarantee multi-processing execution.</span>
<span class="sd"> Use threads instead for concurrent processing purpose.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; from pyspark.context import SparkContext</span>
<span class="sd"> &gt;&gt;&gt; sc = SparkContext(&#39;local&#39;, &#39;test&#39;)</span>
<span class="sd"> &gt;&gt;&gt; sc2 = SparkContext(&#39;local&#39;, &#39;test2&#39;) # doctest: +IGNORE_EXCEPTION_DETAIL</span>
<span class="sd"> Traceback (most recent call last):</span>
<span class="sd"> ...</span>
<span class="sd"> ValueError: ...</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">_gateway</span><span class="p">:</span> <span class="n">ClassVar</span><span class="p">[</span><span class="n">Optional</span><span class="p">[</span><span class="n">JavaGateway</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">_jvm</span><span class="p">:</span> <span class="n">ClassVar</span><span class="p">[</span><span class="n">Optional</span><span class="p">[</span><span class="n">JVMView</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">_next_accum_id</span> <span class="o">=</span> <span class="mi">0</span>
<span class="n">_active_spark_context</span><span class="p">:</span> <span class="n">ClassVar</span><span class="p">[</span><span class="n">Optional</span><span class="p">[</span><span class="s2">&quot;SparkContext&quot;</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="n">_lock</span> <span class="o">=</span> <span class="n">RLock</span><span class="p">()</span>
<span class="n">_python_includes</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span>
<span class="n">List</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span>
<span class="p">]</span> <span class="o">=</span> <span class="kc">None</span> <span class="c1"># zip and egg files that need to be added to PYTHONPATH</span>
<span class="n">serializer</span><span class="p">:</span> <span class="n">Serializer</span>
<span class="n">profiler_collector</span><span class="p">:</span> <span class="n">ProfilerCollector</span>
<span class="n">PACKAGE_EXTENSIONS</span><span class="p">:</span> <span class="n">Iterable</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="s2">&quot;.zip&quot;</span><span class="p">,</span> <span class="s2">&quot;.egg&quot;</span><span class="p">,</span> <span class="s2">&quot;.jar&quot;</span><span class="p">)</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">master</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">appName</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">sparkHome</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">pyFiles</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">str</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">environment</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span>
<span class="n">serializer</span><span class="p">:</span> <span class="s2">&quot;Serializer&quot;</span> <span class="o">=</span> <span class="n">CPickleSerializer</span><span class="p">(),</span>
<span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">SparkConf</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">gateway</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">JavaGateway</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">jsc</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">JavaObject</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">profiler_cls</span><span class="p">:</span> <span class="n">Type</span><span class="p">[</span><span class="n">BasicProfiler</span><span class="p">]</span> <span class="o">=</span> <span class="n">BasicProfiler</span><span class="p">,</span>
<span class="n">udf_profiler_cls</span><span class="p">:</span> <span class="n">Type</span><span class="p">[</span><span class="n">UDFBasicProfiler</span><span class="p">]</span> <span class="o">=</span> <span class="n">UDFBasicProfiler</span><span class="p">,</span>
<span class="p">):</span>
<span class="k">if</span> <span class="n">conf</span> <span class="ow">is</span> <span class="kc">None</span> <span class="ow">or</span> <span class="n">conf</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;spark.executor.allowSparkContext&quot;</span><span class="p">,</span> <span class="s2">&quot;false&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span> <span class="o">!=</span> <span class="s2">&quot;true&quot;</span><span class="p">:</span>
<span class="c1"># In order to prevent SparkContext from being created in executors.</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_assert_on_driver</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_callsite</span> <span class="o">=</span> <span class="n">first_spark_call</span><span class="p">()</span> <span class="ow">or</span> <span class="n">CallSite</span><span class="p">(</span><span class="kc">None</span><span class="p">,</span> <span class="kc">None</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="k">if</span> <span class="n">gateway</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">gateway</span><span class="o">.</span><span class="n">gateway_parameters</span><span class="o">.</span><span class="n">auth_token</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="s2">&quot;You are trying to pass an insecure Py4j gateway to Spark. This&quot;</span>
<span class="s2">&quot; is not allowed as it is a security risk.&quot;</span>
<span class="p">)</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_ensure_initialized</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">gateway</span><span class="o">=</span><span class="n">gateway</span><span class="p">,</span> <span class="n">conf</span><span class="o">=</span><span class="n">conf</span><span class="p">)</span>
<span class="k">try</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_do_init</span><span class="p">(</span>
<span class="n">master</span><span class="p">,</span>
<span class="n">appName</span><span class="p">,</span>
<span class="n">sparkHome</span><span class="p">,</span>
<span class="n">pyFiles</span><span class="p">,</span>
<span class="n">environment</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">,</span>
<span class="n">serializer</span><span class="p">,</span>
<span class="n">conf</span><span class="p">,</span>
<span class="n">jsc</span><span class="p">,</span>
<span class="n">profiler_cls</span><span class="p">,</span>
<span class="n">udf_profiler_cls</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">except</span> <span class="ne">BaseException</span><span class="p">:</span>
<span class="c1"># If an error occurs, clean up in order to allow future SparkContext creation:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">stop</span><span class="p">()</span>
<span class="k">raise</span>
<span class="k">def</span> <span class="nf">_do_init</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">master</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">],</span>
<span class="n">appName</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">],</span>
<span class="n">sparkHome</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">],</span>
<span class="n">pyFiles</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">List</span><span class="p">[</span><span class="nb">str</span><span class="p">]],</span>
<span class="n">environment</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]],</span>
<span class="n">batchSize</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span>
<span class="n">serializer</span><span class="p">:</span> <span class="n">Serializer</span><span class="p">,</span>
<span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">SparkConf</span><span class="p">],</span>
<span class="n">jsc</span><span class="p">:</span> <span class="n">JavaObject</span><span class="p">,</span>
<span class="n">profiler_cls</span><span class="p">:</span> <span class="n">Type</span><span class="p">[</span><span class="n">BasicProfiler</span><span class="p">]</span> <span class="o">=</span> <span class="n">BasicProfiler</span><span class="p">,</span>
<span class="n">udf_profiler_cls</span><span class="p">:</span> <span class="n">Type</span><span class="p">[</span><span class="n">UDFBasicProfiler</span><span class="p">]</span> <span class="o">=</span> <span class="n">UDFBasicProfiler</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">environment</span> <span class="o">=</span> <span class="n">environment</span> <span class="ow">or</span> <span class="p">{}</span>
<span class="c1"># java gateway must have been launched at this point.</span>
<span class="k">if</span> <span class="n">conf</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="n">conf</span><span class="o">.</span><span class="n">_jconf</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="c1"># conf has been initialized in JVM properly, so use conf directly. This represents the</span>
<span class="c1"># scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is</span>
<span class="c1"># created and then stopped, and we create a new SparkConf and new SparkContext again)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span> <span class="o">=</span> <span class="n">conf</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span> <span class="o">=</span> <span class="n">SparkConf</span><span class="p">(</span><span class="n">_jvm</span><span class="o">=</span><span class="n">SparkContext</span><span class="o">.</span><span class="n">_jvm</span><span class="p">)</span>
<span class="k">if</span> <span class="n">conf</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">conf</span><span class="o">.</span><span class="n">getAll</span><span class="p">():</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">set</span><span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_batchSize</span> <span class="o">=</span> <span class="n">batchSize</span> <span class="c1"># -1 represents an unlimited batch size</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_unbatched_serializer</span> <span class="o">=</span> <span class="n">serializer</span>
<span class="k">if</span> <span class="n">batchSize</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">serializer</span> <span class="o">=</span> <span class="n">AutoBatchedSerializer</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_unbatched_serializer</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">serializer</span> <span class="o">=</span> <span class="n">BatchedSerializer</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_unbatched_serializer</span><span class="p">,</span> <span class="n">batchSize</span><span class="p">)</span>
<span class="c1"># Set any parameters passed directly to us on the conf</span>
<span class="k">if</span> <span class="n">master</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">setMaster</span><span class="p">(</span><span class="n">master</span><span class="p">)</span>
<span class="k">if</span> <span class="n">appName</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">setAppName</span><span class="p">(</span><span class="n">appName</span><span class="p">)</span>
<span class="k">if</span> <span class="n">sparkHome</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">setSparkHome</span><span class="p">(</span><span class="n">sparkHome</span><span class="p">)</span>
<span class="k">if</span> <span class="n">environment</span><span class="p">:</span>
<span class="k">for</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="n">environment</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">setExecutorEnv</span><span class="p">(</span><span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">)</span>
<span class="k">for</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="n">DEFAULT_CONFIGS</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">setIfMissing</span><span class="p">(</span><span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">)</span>
<span class="c1"># Check that we have at least the required parameters</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">contains</span><span class="p">(</span><span class="s2">&quot;spark.master&quot;</span><span class="p">):</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;A master URL must be set in your configuration&quot;</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">contains</span><span class="p">(</span><span class="s2">&quot;spark.app.name&quot;</span><span class="p">):</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;An application name must be set in your configuration&quot;</span><span class="p">)</span>
<span class="c1"># Read back our properties from the conf in case we loaded some of them from</span>
<span class="c1"># the classpath or an external config file</span>
<span class="bp">self</span><span class="o">.</span><span class="n">master</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;spark.master&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">appName</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;spark.app.name&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">sparkHome</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;spark.home&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="k">for</span> <span class="p">(</span><span class="n">k</span><span class="p">,</span> <span class="n">v</span><span class="p">)</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">getAll</span><span class="p">():</span>
<span class="k">if</span> <span class="n">k</span><span class="o">.</span><span class="n">startswith</span><span class="p">(</span><span class="s2">&quot;spark.executorEnv.&quot;</span><span class="p">):</span>
<span class="n">varName</span> <span class="o">=</span> <span class="n">k</span><span class="p">[</span><span class="nb">len</span><span class="p">(</span><span class="s2">&quot;spark.executorEnv.&quot;</span><span class="p">)</span> <span class="p">:]</span>
<span class="bp">self</span><span class="o">.</span><span class="n">environment</span><span class="p">[</span><span class="n">varName</span><span class="p">]</span> <span class="o">=</span> <span class="n">v</span>
<span class="bp">self</span><span class="o">.</span><span class="n">environment</span><span class="p">[</span><span class="s2">&quot;PYTHONHASHSEED&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;PYTHONHASHSEED&quot;</span><span class="p">,</span> <span class="s2">&quot;0&quot;</span><span class="p">)</span>
<span class="c1"># Create the Java SparkContext through Py4J</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span> <span class="o">=</span> <span class="n">jsc</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">_initialize_context</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">_jconf</span><span class="p">)</span>
<span class="c1"># Reset the SparkConf to the one actually used by the SparkContext in JVM.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_conf</span> <span class="o">=</span> <span class="n">SparkConf</span><span class="p">(</span><span class="n">_jconf</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">conf</span><span class="p">())</span>
<span class="c1"># Create a single Accumulator in Java that we&#39;ll send all our updates through;</span>
<span class="c1"># they will be passed back to us through a TCP server</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_gateway</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">auth_token</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_gateway</span><span class="o">.</span><span class="n">gateway_parameters</span><span class="o">.</span><span class="n">auth_token</span>
<span class="n">start_update_server</span> <span class="o">=</span> <span class="n">accumulators</span><span class="o">.</span><span class="n">_start_update_server</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_accumulatorServer</span> <span class="o">=</span> <span class="n">start_update_server</span><span class="p">(</span><span class="n">auth_token</span><span class="p">)</span>
<span class="p">(</span><span class="n">host</span><span class="p">,</span> <span class="n">port</span><span class="p">)</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_accumulatorServer</span><span class="o">.</span><span class="n">server_address</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_javaAccumulator</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonAccumulatorV2</span><span class="p">(</span><span class="n">host</span><span class="p">,</span> <span class="n">port</span><span class="p">,</span> <span class="n">auth_token</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">register</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_javaAccumulator</span><span class="p">)</span>
<span class="c1"># If encryption is enabled, we need to setup a server in the jvm to read broadcast</span>
<span class="c1"># data via a socket.</span>
<span class="c1"># scala&#39;s mangled names w/ $ in them require special treatment.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_encryption_enabled</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonUtils</span><span class="o">.</span><span class="n">isEncryptionEnabled</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">)</span>
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">&quot;SPARK_AUTH_SOCKET_TIMEOUT&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="nb">str</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonUtils</span><span class="o">.</span><span class="n">getPythonAuthSocketTimeout</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">)</span>
<span class="p">)</span>
<span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="p">[</span><span class="s2">&quot;SPARK_BUFFER_SIZE&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="nb">str</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonUtils</span><span class="o">.</span><span class="n">getSparkBufferSize</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">))</span>
<span class="bp">self</span><span class="o">.</span><span class="n">pythonExec</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">environ</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;PYSPARK_PYTHON&quot;</span><span class="p">,</span> <span class="s2">&quot;python3&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">pythonVer</span> <span class="o">=</span> <span class="s2">&quot;</span><span class="si">%d</span><span class="s2">.</span><span class="si">%d</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="n">sys</span><span class="o">.</span><span class="n">version_info</span><span class="p">[:</span><span class="mi">2</span><span class="p">]</span>
<span class="c1"># Broadcast&#39;s __reduce__ method stores Broadcast instances here.</span>
<span class="c1"># This allows other code to determine which Broadcast instances have</span>
<span class="c1"># been pickled, so it can determine which Java broadcast objects to</span>
<span class="c1"># send.</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_pickled_broadcast_vars</span> <span class="o">=</span> <span class="n">BroadcastPickleRegistry</span><span class="p">()</span>
<span class="n">SparkFiles</span><span class="o">.</span><span class="n">_sc</span> <span class="o">=</span> <span class="bp">self</span>
<span class="n">root_dir</span> <span class="o">=</span> <span class="n">SparkFiles</span><span class="o">.</span><span class="n">getRootDirectory</span><span class="p">()</span>
<span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">root_dir</span><span class="p">)</span>
<span class="c1"># Deploy any code dependencies specified in the constructor</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_python_includes</span> <span class="o">=</span> <span class="nb">list</span><span class="p">()</span>
<span class="k">for</span> <span class="n">path</span> <span class="ow">in</span> <span class="n">pyFiles</span> <span class="ow">or</span> <span class="p">[]:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">addPyFile</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="c1"># Deploy code dependencies set by spark-submit; these will already have been added</span>
<span class="c1"># with SparkContext.addFile, so we just need to add them to the PYTHONPATH</span>
<span class="k">for</span> <span class="n">path</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;spark.submit.pyFiles&quot;</span><span class="p">,</span> <span class="s2">&quot;&quot;</span><span class="p">)</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s2">&quot;,&quot;</span><span class="p">):</span>
<span class="k">if</span> <span class="n">path</span> <span class="o">!=</span> <span class="s2">&quot;&quot;</span><span class="p">:</span>
<span class="p">(</span><span class="n">dirname</span><span class="p">,</span> <span class="n">filename</span><span class="p">)</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="k">try</span><span class="p">:</span>
<span class="n">filepath</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">SparkFiles</span><span class="o">.</span><span class="n">getRootDirectory</span><span class="p">(),</span> <span class="n">filename</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">filepath</span><span class="p">):</span>
<span class="c1"># In case of YARN with shell mode, &#39;spark.submit.pyFiles&#39; files are</span>
<span class="c1"># not added via SparkContext.addFile. Here we check if the file exists,</span>
<span class="c1"># try to copy and then add it to the path. See SPARK-21945.</span>
<span class="n">shutil</span><span class="o">.</span><span class="n">copyfile</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">filepath</span><span class="p">)</span>
<span class="k">if</span> <span class="n">filename</span><span class="p">[</span><span class="o">-</span><span class="mi">4</span><span class="p">:]</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">PACKAGE_EXTENSIONS</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_python_includes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">filename</span><span class="p">)</span>
<span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">filepath</span><span class="p">)</span>
<span class="k">except</span> <span class="ne">Exception</span><span class="p">:</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span>
<span class="s2">&quot;Failed to add file [</span><span class="si">%s</span><span class="s2">] specified in &#39;spark.submit.pyFiles&#39; to &quot;</span>
<span class="s2">&quot;Python path:</span><span class="se">\n</span><span class="s2"> </span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="s2">&quot;</span><span class="se">\n</span><span class="s2"> &quot;</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="p">)),</span>
<span class="ne">RuntimeWarning</span><span class="p">,</span>
<span class="p">)</span>
<span class="c1"># Create a temporary directory inside spark.local.dir:</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">local_dir</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">org</span><span class="o">.</span><span class="n">apache</span><span class="o">.</span><span class="n">spark</span><span class="o">.</span><span class="n">util</span><span class="o">.</span><span class="n">Utils</span><span class="o">.</span><span class="n">getLocalDir</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">conf</span><span class="p">())</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_temp_dir</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">org</span><span class="o">.</span><span class="n">apache</span><span class="o">.</span><span class="n">spark</span><span class="o">.</span><span class="n">util</span><span class="o">.</span><span class="n">Utils</span><span class="o">.</span><span class="n">createTempDir</span><span class="p">(</span>
<span class="n">local_dir</span><span class="p">,</span> <span class="s2">&quot;pyspark&quot;</span>
<span class="p">)</span><span class="o">.</span><span class="n">getAbsolutePath</span><span class="p">()</span>
<span class="c1"># profiling stats collected for each PythonRDD</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;spark.python.profile&quot;</span><span class="p">,</span> <span class="s2">&quot;false&quot;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;true&quot;</span><span class="p">:</span>
<span class="n">dump_path</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s2">&quot;spark.python.profile.dump&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">profiler_collector</span> <span class="o">=</span> <span class="n">ProfilerCollector</span><span class="p">(</span><span class="n">profiler_cls</span><span class="p">,</span> <span class="n">udf_profiler_cls</span><span class="p">,</span> <span class="n">dump_path</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">profiler_collector</span> <span class="o">=</span> <span class="kc">None</span> <span class="c1"># type: ignore[assignment]</span>
<span class="c1"># create a signal handler which would be invoked on receiving SIGINT</span>
<span class="k">def</span> <span class="nf">signal_handler</span><span class="p">(</span><span class="n">signal</span><span class="p">:</span> <span class="n">Any</span><span class="p">,</span> <span class="n">frame</span><span class="p">:</span> <span class="n">Any</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">NoReturn</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">cancelAllJobs</span><span class="p">()</span>
<span class="k">raise</span> <span class="ne">KeyboardInterrupt</span><span class="p">()</span>
<span class="c1"># see http://stackoverflow.com/questions/23206787/</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span>
<span class="n">threading</span><span class="o">.</span><span class="n">current_thread</span><span class="p">(),</span> <span class="n">threading</span><span class="o">.</span><span class="n">_MainThread</span> <span class="c1"># type: ignore[attr-defined]</span>
<span class="p">):</span>
<span class="n">signal</span><span class="o">.</span><span class="n">signal</span><span class="p">(</span><span class="n">signal</span><span class="o">.</span><span class="n">SIGINT</span><span class="p">,</span> <span class="n">signal_handler</span><span class="p">)</span>
<span class="k">def</span> <span class="fm">__repr__</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="k">return</span> <span class="s2">&quot;&lt;SparkContext master=</span><span class="si">{master}</span><span class="s2"> appName=</span><span class="si">{appName}</span><span class="s2">&gt;&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
<span class="n">master</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">master</span><span class="p">,</span>
<span class="n">appName</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">appName</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">def</span> <span class="nf">_repr_html_</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="k">return</span> <span class="s2">&quot;&quot;&quot;</span>
<span class="s2"> &lt;div&gt;</span>
<span class="s2"> &lt;p&gt;&lt;b&gt;SparkContext&lt;/b&gt;&lt;/p&gt;</span>
<span class="s2"> &lt;p&gt;&lt;a href=&quot;</span><span class="si">{sc.uiWebUrl}</span><span class="s2">&quot;&gt;Spark UI&lt;/a&gt;&lt;/p&gt;</span>
<span class="s2"> &lt;dl&gt;</span>
<span class="s2"> &lt;dt&gt;Version&lt;/dt&gt;</span>
<span class="s2"> &lt;dd&gt;&lt;code&gt;v</span><span class="si">{sc.version}</span><span class="s2">&lt;/code&gt;&lt;/dd&gt;</span>
<span class="s2"> &lt;dt&gt;Master&lt;/dt&gt;</span>
<span class="s2"> &lt;dd&gt;&lt;code&gt;</span><span class="si">{sc.master}</span><span class="s2">&lt;/code&gt;&lt;/dd&gt;</span>
<span class="s2"> &lt;dt&gt;AppName&lt;/dt&gt;</span>
<span class="s2"> &lt;dd&gt;&lt;code&gt;</span><span class="si">{sc.appName}</span><span class="s2">&lt;/code&gt;&lt;/dd&gt;</span>
<span class="s2"> &lt;/dl&gt;</span>
<span class="s2"> &lt;/div&gt;</span>
<span class="s2"> &quot;&quot;&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
<span class="n">sc</span><span class="o">=</span><span class="bp">self</span>
<span class="p">)</span>
<span class="k">def</span> <span class="nf">_initialize_context</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">jconf</span><span class="p">:</span> <span class="n">JavaObject</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">JavaObject</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Initialize SparkContext in function to allow subclass specific initialization</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">JavaSparkContext</span><span class="p">(</span><span class="n">jconf</span><span class="p">)</span>
<span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">_ensure_initialized</span><span class="p">(</span>
<span class="bp">cls</span><span class="p">,</span>
<span class="n">instance</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="s2">&quot;SparkContext&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">gateway</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">JavaGateway</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">SparkConf</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Checks whether a SparkContext is initialized or not.</span>
<span class="sd"> Throws error if a SparkContext is already running.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">with</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_lock</span><span class="p">:</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_gateway</span><span class="p">:</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_gateway</span> <span class="o">=</span> <span class="n">gateway</span> <span class="ow">or</span> <span class="n">launch_gateway</span><span class="p">(</span><span class="n">conf</span><span class="p">)</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_jvm</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_gateway</span><span class="o">.</span><span class="n">jvm</span>
<span class="k">if</span> <span class="n">instance</span><span class="p">:</span>
<span class="k">if</span> <span class="p">(</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span>
<span class="ow">and</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span> <span class="o">!=</span> <span class="n">instance</span>
<span class="p">):</span>
<span class="n">currentMaster</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span><span class="o">.</span><span class="n">master</span>
<span class="n">currentAppName</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span><span class="o">.</span><span class="n">appName</span>
<span class="n">callsite</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span><span class="o">.</span><span class="n">_callsite</span>
<span class="c1"># Raise error if there is already a running Spark context</span>
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
<span class="s2">&quot;Cannot run multiple SparkContexts at once; &quot;</span>
<span class="s2">&quot;existing SparkContext(app=</span><span class="si">%s</span><span class="s2">, master=</span><span class="si">%s</span><span class="s2">)&quot;</span>
<span class="s2">&quot; created by </span><span class="si">%s</span><span class="s2"> at </span><span class="si">%s</span><span class="s2">:</span><span class="si">%s</span><span class="s2"> &quot;</span>
<span class="o">%</span> <span class="p">(</span>
<span class="n">currentAppName</span><span class="p">,</span>
<span class="n">currentMaster</span><span class="p">,</span>
<span class="n">callsite</span><span class="o">.</span><span class="n">function</span><span class="p">,</span>
<span class="n">callsite</span><span class="o">.</span><span class="n">file</span><span class="p">,</span>
<span class="n">callsite</span><span class="o">.</span><span class="n">linenum</span><span class="p">,</span>
<span class="p">)</span>
<span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span> <span class="o">=</span> <span class="n">instance</span>
<span class="k">def</span> <span class="nf">__getnewargs__</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">NoReturn</span><span class="p">:</span>
<span class="c1"># This method is called when attempting to pickle SparkContext, which is always an error:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="s2">&quot;It appears that you are attempting to reference SparkContext from a broadcast &quot;</span>
<span class="s2">&quot;variable, action, or transformation. SparkContext can only be used on the driver, &quot;</span>
<span class="s2">&quot;not in code that it run on workers. For more information, see SPARK-5063.&quot;</span>
<span class="p">)</span>
<span class="k">def</span> <span class="fm">__enter__</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="s2">&quot;SparkContext&quot;</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Enable &#39;with SparkContext(...) as sc: app(sc)&#39; syntax.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span>
<span class="k">def</span> <span class="fm">__exit__</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="nb">type</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Type</span><span class="p">[</span><span class="ne">BaseException</span><span class="p">]],</span>
<span class="n">value</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="ne">BaseException</span><span class="p">],</span>
<span class="n">trace</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">TracebackType</span><span class="p">],</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Enable &#39;with SparkContext(...) as sc: app&#39; syntax.</span>
<span class="sd"> Specifically stop the context on exit of the with block.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">stop</span><span class="p">()</span>
<div class="viewcode-block" id="SparkContext.getOrCreate"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.getOrCreate.html#pyspark.SparkContext.getOrCreate">[docs]</a> <span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">getOrCreate</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">SparkConf</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="s2">&quot;SparkContext&quot;</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Get or instantiate a SparkContext and register it as a singleton object.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> conf : :py:class:`pyspark.SparkConf`, optional</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">with</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_lock</span><span class="p">:</span>
<span class="k">if</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">SparkContext</span><span class="p">(</span><span class="n">conf</span><span class="o">=</span><span class="n">conf</span> <span class="ow">or</span> <span class="n">SparkConf</span><span class="p">())</span>
<span class="k">assert</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="k">return</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span></div>
<div class="viewcode-block" id="SparkContext.setLogLevel"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.setLogLevel.html#pyspark.SparkContext.setLogLevel">[docs]</a> <span class="k">def</span> <span class="nf">setLogLevel</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">logLevel</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Control our logLevel. This overrides any user-defined log settings.</span>
<span class="sd"> Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">setLogLevel</span><span class="p">(</span><span class="n">logLevel</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.setSystemProperty"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.setSystemProperty.html#pyspark.SparkContext.setSystemProperty">[docs]</a> <span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">setSystemProperty</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">key</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Set a Java system property, such as spark.executor.memory. This must</span>
<span class="sd"> must be invoked before instantiating SparkContext.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_ensure_initialized</span><span class="p">()</span>
<span class="k">assert</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">java</span><span class="o">.</span><span class="n">lang</span><span class="o">.</span><span class="n">System</span><span class="o">.</span><span class="n">setProperty</span><span class="p">(</span><span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">)</span></div>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">version</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> The version of Spark on which this application is running.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">version</span><span class="p">()</span>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">applicationId</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> A unique identifier for the Spark application.</span>
<span class="sd"> Its format depends on the scheduler implementation.</span>
<span class="sd"> * in case of local spark app something like &#39;local-1433865536131&#39;</span>
<span class="sd"> * in case of YARN something like &#39;application_1433865536131_34483&#39;</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; sc.applicationId # doctest: +ELLIPSIS</span>
<span class="sd"> &#39;local-...&#39;</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">applicationId</span><span class="p">()</span>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">uiWebUrl</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;Return the URL of the SparkUI instance started by this SparkContext&quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">uiWebUrl</span><span class="p">()</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">startTime</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">int</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;Return the epoch time when the Spark Context was started.&quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">startTime</span><span class="p">()</span>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">defaultParallelism</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">int</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Default level of parallelism to use when not given by user (e.g. for</span>
<span class="sd"> reduce tasks)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">defaultParallelism</span><span class="p">()</span>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">defaultMinPartitions</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">int</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Default min number of partitions for Hadoop RDDs when not given by user</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">defaultMinPartitions</span><span class="p">()</span>
<div class="viewcode-block" id="SparkContext.stop"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.stop.html#pyspark.SparkContext.stop">[docs]</a> <span class="k">def</span> <span class="nf">stop</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Shut down the SparkContext.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="s2">&quot;_jsc&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">):</span>
<span class="k">try</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">stop</span><span class="p">()</span>
<span class="k">except</span> <span class="n">Py4JError</span><span class="p">:</span>
<span class="c1"># Case: SPARK-18523</span>
<span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span>
<span class="s2">&quot;Unable to cleanly shutdown Spark JVM process.&quot;</span>
<span class="s2">&quot; It is possible that the process has crashed,&quot;</span>
<span class="s2">&quot; been killed or may also be in a zombie state.&quot;</span><span class="p">,</span>
<span class="ne">RuntimeWarning</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">finally</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span> <span class="o">=</span> <span class="kc">None</span>
<span class="k">if</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="s2">&quot;_accumulatorServer&quot;</span><span class="p">,</span> <span class="kc">None</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_accumulatorServer</span><span class="o">.</span><span class="n">shutdown</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_accumulatorServer</span> <span class="o">=</span> <span class="kc">None</span> <span class="c1"># type: ignore[assignment]</span>
<span class="k">with</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_lock</span><span class="p">:</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_active_spark_context</span> <span class="o">=</span> <span class="kc">None</span></div>
<div class="viewcode-block" id="SparkContext.emptyRDD"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.emptyRDD.html#pyspark.SparkContext.emptyRDD">[docs]</a> <span class="k">def</span> <span class="nf">emptyRDD</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Any</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Create an RDD that has no partitions or elements.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">emptyRDD</span><span class="p">(),</span> <span class="bp">self</span><span class="p">,</span> <span class="n">NoOpSerializer</span><span class="p">())</span></div>
<div class="viewcode-block" id="SparkContext.range"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.range.html#pyspark.SparkContext.range">[docs]</a> <span class="k">def</span> <span class="nf">range</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span> <span class="n">start</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span> <span class="n">end</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span> <span class="n">step</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">1</span><span class="p">,</span> <span class="n">numSlices</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="nb">int</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Create a new RDD of int containing elements from `start` to `end`</span>
<span class="sd"> (exclusive), increased by `step` every element. Can be called the same</span>
<span class="sd"> way as python&#39;s built-in range() function. If called with a single argument,</span>
<span class="sd"> the argument is interpreted as `end`, and `start` is set to 0.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> start : int</span>
<span class="sd"> the start value</span>
<span class="sd"> end : int, optional</span>
<span class="sd"> the end value (exclusive)</span>
<span class="sd"> step : int, optional</span>
<span class="sd"> the incremental step (default: 1)</span>
<span class="sd"> numSlices : int, optional</span>
<span class="sd"> the number of partitions of the new RDD</span>
<span class="sd"> Returns</span>
<span class="sd"> -------</span>
<span class="sd"> :py:class:`pyspark.RDD`</span>
<span class="sd"> An RDD of int</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; sc.range(5).collect()</span>
<span class="sd"> [0, 1, 2, 3, 4]</span>
<span class="sd"> &gt;&gt;&gt; sc.range(2, 4).collect()</span>
<span class="sd"> [2, 3]</span>
<span class="sd"> &gt;&gt;&gt; sc.range(1, 7, 2).collect()</span>
<span class="sd"> [1, 3, 5]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">end</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">end</span> <span class="o">=</span> <span class="n">start</span>
<span class="n">start</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">parallelize</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="n">start</span><span class="p">,</span> <span class="n">end</span><span class="p">,</span> <span class="n">step</span><span class="p">),</span> <span class="n">numSlices</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.parallelize"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.parallelize.html#pyspark.SparkContext.parallelize">[docs]</a> <span class="k">def</span> <span class="nf">parallelize</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">c</span><span class="p">:</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">T</span><span class="p">],</span> <span class="n">numSlices</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">T</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Distribute a local Python collection to form an RDD. Using range</span>
<span class="sd"> is recommended if the input represents a range for performance.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()</span>
<span class="sd"> [[0], [2], [3], [4], [6]]</span>
<span class="sd"> &gt;&gt;&gt; sc.parallelize(range(0, 6, 2), 5).glom().collect()</span>
<span class="sd"> [[], [0], [], [2], [4]]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">numSlices</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">numSlices</span><span class="p">)</span> <span class="k">if</span> <span class="n">numSlices</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">defaultParallelism</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">c</span><span class="p">,</span> <span class="nb">range</span><span class="p">):</span>
<span class="n">size</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">c</span><span class="p">)</span>
<span class="k">if</span> <span class="n">size</span> <span class="o">==</span> <span class="mi">0</span><span class="p">:</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">parallelize</span><span class="p">([],</span> <span class="n">numSlices</span><span class="p">)</span>
<span class="n">step</span> <span class="o">=</span> <span class="n">c</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">-</span> <span class="n">c</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="k">if</span> <span class="n">size</span> <span class="o">&gt;</span> <span class="mi">1</span> <span class="k">else</span> <span class="mi">1</span> <span class="c1"># type: ignore[index]</span>
<span class="n">start0</span> <span class="o">=</span> <span class="n">c</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="c1"># type: ignore[index]</span>
<span class="k">def</span> <span class="nf">getStart</span><span class="p">(</span><span class="n">split</span><span class="p">:</span> <span class="nb">int</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">int</span><span class="p">:</span>
<span class="k">assert</span> <span class="n">numSlices</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="k">return</span> <span class="n">start0</span> <span class="o">+</span> <span class="nb">int</span><span class="p">((</span><span class="n">split</span> <span class="o">*</span> <span class="n">size</span> <span class="o">/</span> <span class="n">numSlices</span><span class="p">))</span> <span class="o">*</span> <span class="n">step</span>
<span class="k">def</span> <span class="nf">f</span><span class="p">(</span><span class="n">split</span><span class="p">:</span> <span class="nb">int</span><span class="p">,</span> <span class="n">iterator</span><span class="p">:</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">T</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="n">Iterable</span><span class="p">:</span>
<span class="c1"># it&#39;s an empty iterator here but we need this line for triggering the</span>
<span class="c1"># logic of signal handling in FramedSerializer.load_stream, for instance,</span>
<span class="c1"># SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since</span>
<span class="c1"># FramedSerializer.load_stream produces a generator, the control should</span>
<span class="c1"># at least be in that function once. Here we do it by explicitly converting</span>
<span class="c1"># the empty iterator to a list, thus make sure worker reuse takes effect.</span>
<span class="c1"># See more details in SPARK-26549.</span>
<span class="k">assert</span> <span class="nb">len</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="n">iterator</span><span class="p">))</span> <span class="o">==</span> <span class="mi">0</span>
<span class="k">return</span> <span class="nb">range</span><span class="p">(</span><span class="n">getStart</span><span class="p">(</span><span class="n">split</span><span class="p">),</span> <span class="n">getStart</span><span class="p">(</span><span class="n">split</span> <span class="o">+</span> <span class="mi">1</span><span class="p">),</span> <span class="n">step</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">parallelize</span><span class="p">([],</span> <span class="n">numSlices</span><span class="p">)</span><span class="o">.</span><span class="n">mapPartitionsWithIndex</span><span class="p">(</span><span class="n">f</span><span class="p">)</span>
<span class="c1"># Make sure we distribute data evenly if it&#39;s smaller than self.batchSize</span>
<span class="k">if</span> <span class="s2">&quot;__len__&quot;</span> <span class="ow">not</span> <span class="ow">in</span> <span class="nb">dir</span><span class="p">(</span><span class="n">c</span><span class="p">):</span>
<span class="n">c</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">c</span><span class="p">)</span> <span class="c1"># Make it a list so we can compute its length</span>
<span class="n">batchSize</span> <span class="o">=</span> <span class="nb">max</span><span class="p">(</span>
<span class="mi">1</span><span class="p">,</span> <span class="nb">min</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">c</span><span class="p">)</span> <span class="o">//</span> <span class="n">numSlices</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_batchSize</span> <span class="ow">or</span> <span class="mi">1024</span><span class="p">)</span> <span class="c1"># type: ignore[arg-type]</span>
<span class="p">)</span>
<span class="n">serializer</span> <span class="o">=</span> <span class="n">BatchedSerializer</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_unbatched_serializer</span><span class="p">,</span> <span class="n">batchSize</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">reader_func</span><span class="p">(</span><span class="n">temp_filename</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">JavaObject</span><span class="p">:</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonRDD</span><span class="o">.</span><span class="n">readRDDFromFile</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">,</span> <span class="n">temp_filename</span><span class="p">,</span> <span class="n">numSlices</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">createRDDServer</span><span class="p">()</span> <span class="o">-&gt;</span> <span class="n">JavaObject</span><span class="p">:</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonParallelizeServer</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">(),</span> <span class="n">numSlices</span><span class="p">)</span>
<span class="n">jrdd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_serialize_to_jvm</span><span class="p">(</span><span class="n">c</span><span class="p">,</span> <span class="n">serializer</span><span class="p">,</span> <span class="n">reader_func</span><span class="p">,</span> <span class="n">createRDDServer</span><span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="n">jrdd</span><span class="p">,</span> <span class="bp">self</span><span class="p">,</span> <span class="n">serializer</span><span class="p">)</span></div>
<span class="k">def</span> <span class="nf">_serialize_to_jvm</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">data</span><span class="p">:</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">T</span><span class="p">],</span>
<span class="n">serializer</span><span class="p">:</span> <span class="n">Serializer</span><span class="p">,</span>
<span class="n">reader_func</span><span class="p">:</span> <span class="n">Callable</span><span class="p">,</span>
<span class="n">createRDDServer</span><span class="p">:</span> <span class="n">Callable</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">JavaObject</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Using py4j to send a large dataset to the jvm is really slow, so we use either a file</span>
<span class="sd"> or a socket if we have encryption enabled.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> data</span>
<span class="sd"> object to be serialized</span>
<span class="sd"> serializer : :py:class:`pyspark.serializers.Serializer`</span>
<span class="sd"> reader_func : function</span>
<span class="sd"> A function which takes a filename and reads in the data in the jvm and</span>
<span class="sd"> returns a JavaRDD. Only used when encryption is disabled.</span>
<span class="sd"> createRDDServer : function</span>
<span class="sd"> A function which creates a PythonRDDServer in the jvm to</span>
<span class="sd"> accept the serialized data, for use when encryption is enabled.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_encryption_enabled</span><span class="p">:</span>
<span class="c1"># with encryption, we open a server in java and send the data directly</span>
<span class="n">server</span> <span class="o">=</span> <span class="n">createRDDServer</span><span class="p">()</span>
<span class="p">(</span><span class="n">sock_file</span><span class="p">,</span> <span class="n">_</span><span class="p">)</span> <span class="o">=</span> <span class="n">local_connect_and_auth</span><span class="p">(</span><span class="n">server</span><span class="o">.</span><span class="n">port</span><span class="p">(),</span> <span class="n">server</span><span class="o">.</span><span class="n">secret</span><span class="p">())</span>
<span class="n">chunked_out</span> <span class="o">=</span> <span class="n">ChunkedStream</span><span class="p">(</span><span class="n">sock_file</span><span class="p">,</span> <span class="mi">8192</span><span class="p">)</span>
<span class="n">serializer</span><span class="o">.</span><span class="n">dump_stream</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="n">chunked_out</span><span class="p">)</span>
<span class="n">chunked_out</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<span class="c1"># this call will block until the server has read all the data and processed it (or</span>
<span class="c1"># throws an exception)</span>
<span class="n">r</span> <span class="o">=</span> <span class="n">server</span><span class="o">.</span><span class="n">getResult</span><span class="p">()</span>
<span class="k">return</span> <span class="n">r</span>
<span class="k">else</span><span class="p">:</span>
<span class="c1"># without encryption, we serialize to a file, and we read the file in java and</span>
<span class="c1"># parallelize from there.</span>
<span class="n">tempFile</span> <span class="o">=</span> <span class="n">NamedTemporaryFile</span><span class="p">(</span><span class="n">delete</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="nb">dir</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_temp_dir</span><span class="p">)</span>
<span class="k">try</span><span class="p">:</span>
<span class="k">try</span><span class="p">:</span>
<span class="n">serializer</span><span class="o">.</span><span class="n">dump_stream</span><span class="p">(</span><span class="n">data</span><span class="p">,</span> <span class="n">tempFile</span><span class="p">)</span>
<span class="k">finally</span><span class="p">:</span>
<span class="n">tempFile</span><span class="o">.</span><span class="n">close</span><span class="p">()</span>
<span class="k">return</span> <span class="n">reader_func</span><span class="p">(</span><span class="n">tempFile</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
<span class="k">finally</span><span class="p">:</span>
<span class="c1"># we eagerly reads the file so we can delete right after.</span>
<span class="n">os</span><span class="o">.</span><span class="n">unlink</span><span class="p">(</span><span class="n">tempFile</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
<div class="viewcode-block" id="SparkContext.pickleFile"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.pickleFile.html#pyspark.SparkContext.pickleFile">[docs]</a> <span class="k">def</span> <span class="nf">pickleFile</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Any</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Load an RDD previously saved using :meth:`RDD.saveAsPickleFile` method.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; tmpFile = NamedTemporaryFile(delete=True)</span>
<span class="sd"> &gt;&gt;&gt; tmpFile.close()</span>
<span class="sd"> &gt;&gt;&gt; sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)</span>
<span class="sd"> &gt;&gt;&gt; sorted(sc.pickleFile(tmpFile.name, 3).collect())</span>
<span class="sd"> [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">minPartitions</span> <span class="o">=</span> <span class="n">minPartitions</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">defaultMinPartitions</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">objectFile</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">),</span> <span class="bp">self</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.textFile"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.textFile.html#pyspark.SparkContext.textFile">[docs]</a> <span class="k">def</span> <span class="nf">textFile</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span> <span class="n">use_unicode</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="nb">str</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read a text file from HDFS, a local file system (available on all</span>
<span class="sd"> nodes), or any Hadoop-supported file system URI, and return it as an</span>
<span class="sd"> RDD of Strings.</span>
<span class="sd"> The text files must be encoded as UTF-8.</span>
<span class="sd"> If use_unicode is False, the strings will be kept as `str` (encoding</span>
<span class="sd"> as `utf-8`), which is faster and smaller than unicode. (Added in</span>
<span class="sd"> Spark 1.2)</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; path = os.path.join(tempdir, &quot;sample-text.txt&quot;)</span>
<span class="sd"> &gt;&gt;&gt; with open(path, &quot;w&quot;) as testFile:</span>
<span class="sd"> ... _ = testFile.write(&quot;Hello world!&quot;)</span>
<span class="sd"> &gt;&gt;&gt; textFile = sc.textFile(path)</span>
<span class="sd"> &gt;&gt;&gt; textFile.collect()</span>
<span class="sd"> [&#39;Hello world!&#39;]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">minPartitions</span> <span class="o">=</span> <span class="n">minPartitions</span> <span class="ow">or</span> <span class="nb">min</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">defaultParallelism</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">textFile</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">),</span> <span class="bp">self</span><span class="p">,</span> <span class="n">UTF8Deserializer</span><span class="p">(</span><span class="n">use_unicode</span><span class="p">))</span></div>
<div class="viewcode-block" id="SparkContext.wholeTextFiles"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.wholeTextFiles.html#pyspark.SparkContext.wholeTextFiles">[docs]</a> <span class="k">def</span> <span class="nf">wholeTextFiles</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span> <span class="n">use_unicode</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">True</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">str</span><span class="p">]]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read a directory of text files from HDFS, a local file system</span>
<span class="sd"> (available on all nodes), or any Hadoop-supported file system</span>
<span class="sd"> URI. Each file is read as a single record and returned in a</span>
<span class="sd"> key-value pair, where the key is the path of each file, the</span>
<span class="sd"> value is the content of each file.</span>
<span class="sd"> The text files must be encoded as UTF-8.</span>
<span class="sd"> If `use_unicode` is False, the strings will be kept as `str` (encoding</span>
<span class="sd"> as `utf-8`), which is faster and smaller than unicode. (Added in</span>
<span class="sd"> Spark 1.2)</span>
<span class="sd"> For example, if you have the following files:</span>
<span class="sd"> .. code-block:: text</span>
<span class="sd"> hdfs://a-hdfs-path/part-00000</span>
<span class="sd"> hdfs://a-hdfs-path/part-00001</span>
<span class="sd"> ...</span>
<span class="sd"> hdfs://a-hdfs-path/part-nnnnn</span>
<span class="sd"> Do ``rdd = sparkContext.wholeTextFiles(&quot;hdfs://a-hdfs-path&quot;)``,</span>
<span class="sd"> then ``rdd`` contains:</span>
<span class="sd"> .. code-block:: text</span>
<span class="sd"> (a-hdfs-path/part-00000, its content)</span>
<span class="sd"> (a-hdfs-path/part-00001, its content)</span>
<span class="sd"> ...</span>
<span class="sd"> (a-hdfs-path/part-nnnnn, its content)</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> Small files are preferred, as each file will be loaded fully in memory.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; dirPath = os.path.join(tempdir, &quot;files&quot;)</span>
<span class="sd"> &gt;&gt;&gt; os.mkdir(dirPath)</span>
<span class="sd"> &gt;&gt;&gt; with open(os.path.join(dirPath, &quot;1.txt&quot;), &quot;w&quot;) as file1:</span>
<span class="sd"> ... _ = file1.write(&quot;1&quot;)</span>
<span class="sd"> &gt;&gt;&gt; with open(os.path.join(dirPath, &quot;2.txt&quot;), &quot;w&quot;) as file2:</span>
<span class="sd"> ... _ = file2.write(&quot;2&quot;)</span>
<span class="sd"> &gt;&gt;&gt; textFiles = sc.wholeTextFiles(dirPath)</span>
<span class="sd"> &gt;&gt;&gt; sorted(textFiles.collect())</span>
<span class="sd"> [(&#39;.../1.txt&#39;, &#39;1&#39;), (&#39;.../2.txt&#39;, &#39;2&#39;)]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">minPartitions</span> <span class="o">=</span> <span class="n">minPartitions</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">defaultMinPartitions</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">wholeTextFiles</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">),</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">PairDeserializer</span><span class="p">(</span><span class="n">UTF8Deserializer</span><span class="p">(</span><span class="n">use_unicode</span><span class="p">),</span> <span class="n">UTF8Deserializer</span><span class="p">(</span><span class="n">use_unicode</span><span class="p">)),</span>
<span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.binaryFiles"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.binaryFiles.html#pyspark.SparkContext.binaryFiles">[docs]</a> <span class="k">def</span> <span class="nf">binaryFiles</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">bytes</span><span class="p">]]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read a directory of binary files from HDFS, a local file system</span>
<span class="sd"> (available on all nodes), or any Hadoop-supported file system URI</span>
<span class="sd"> as a byte array. Each file is read as a single record and returned</span>
<span class="sd"> in a key-value pair, where the key is the path of each file, the</span>
<span class="sd"> value is the content of each file.</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> Small files are preferred, large file is also allowable, but may cause bad performance.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">minPartitions</span> <span class="o">=</span> <span class="n">minPartitions</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">defaultMinPartitions</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">binaryFiles</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">minPartitions</span><span class="p">),</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">PairDeserializer</span><span class="p">(</span><span class="n">UTF8Deserializer</span><span class="p">(),</span> <span class="n">NoOpSerializer</span><span class="p">()),</span>
<span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.binaryRecords"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.binaryRecords.html#pyspark.SparkContext.binaryRecords">[docs]</a> <span class="k">def</span> <span class="nf">binaryRecords</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">recordLength</span><span class="p">:</span> <span class="nb">int</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="nb">bytes</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Load data from a flat binary file, assuming each record is a set of numbers</span>
<span class="sd"> with the specified numerical format (see ByteBuffer), and the number of</span>
<span class="sd"> bytes per record is constant.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> path : str</span>
<span class="sd"> Directory to the input data files</span>
<span class="sd"> recordLength : int</span>
<span class="sd"> The length at which to split the records</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">binaryRecords</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">recordLength</span><span class="p">),</span> <span class="bp">self</span><span class="p">,</span> <span class="n">NoOpSerializer</span><span class="p">())</span></div>
<span class="k">def</span> <span class="nf">_dictToJavaMap</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">d</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">str</span><span class="p">]])</span> <span class="o">-&gt;</span> <span class="n">JavaMap</span><span class="p">:</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jm</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">java</span><span class="o">.</span><span class="n">util</span><span class="o">.</span><span class="n">HashMap</span><span class="p">()</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">d</span><span class="p">:</span>
<span class="n">d</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">d</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="n">jm</span><span class="p">[</span><span class="n">k</span><span class="p">]</span> <span class="o">=</span> <span class="n">v</span>
<span class="k">return</span> <span class="n">jm</span>
<div class="viewcode-block" id="SparkContext.sequenceFile"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.sequenceFile.html#pyspark.SparkContext.sequenceFile">[docs]</a> <span class="k">def</span> <span class="nf">sequenceFile</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">minSplits</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="n">T</span><span class="p">,</span> <span class="n">U</span><span class="p">]]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,</span>
<span class="sd"> a local file system (available on all nodes), or any Hadoop-supported file system URI.</span>
<span class="sd"> The mechanism is as follows:</span>
<span class="sd"> 1. A Java RDD is created from the SequenceFile or other InputFormat, and the key</span>
<span class="sd"> and value Writable classes</span>
<span class="sd"> 2. Serialization is attempted via Pickle pickling</span>
<span class="sd"> 3. If this fails, the fallback is to call &#39;toString&#39; on each key and value</span>
<span class="sd"> 4. :class:`CPickleSerializer` is used to deserialize pickled objects on the Python side</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> path : str</span>
<span class="sd"> path to sequencefile</span>
<span class="sd"> keyClass: str, optional</span>
<span class="sd"> fully qualified classname of key Writable class (e.g. &quot;org.apache.hadoop.io.Text&quot;)</span>
<span class="sd"> valueClass : str, optional</span>
<span class="sd"> fully qualified classname of value Writable class</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.io.LongWritable&quot;)</span>
<span class="sd"> keyConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning key WritableConverter</span>
<span class="sd"> valueConverter : str, optional</span>
<span class="sd"> fully qualifiedname of a function returning value WritableConverter</span>
<span class="sd"> minSplits : int, optional</span>
<span class="sd"> minimum splits in dataset (default min(2, sc.defaultParallelism))</span>
<span class="sd"> batchSize : int, optional</span>
<span class="sd"> The number of Python objects represented as a single</span>
<span class="sd"> Java object. (default 0, choose batchSize automatically)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">minSplits</span> <span class="o">=</span> <span class="n">minSplits</span> <span class="ow">or</span> <span class="nb">min</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">defaultParallelism</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jrdd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonRDD</span><span class="o">.</span><span class="n">sequenceFile</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">,</span>
<span class="n">path</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">,</span>
<span class="n">minSplits</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="n">jrdd</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.newAPIHadoopFile"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.newAPIHadoopFile.html#pyspark.SparkContext.newAPIHadoopFile">[docs]</a> <span class="k">def</span> <span class="nf">newAPIHadoopFile</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">str</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="n">T</span><span class="p">,</span> <span class="n">U</span><span class="p">]]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read a &#39;new API&#39; Hadoop InputFormat with arbitrary key and value class from HDFS,</span>
<span class="sd"> a local file system (available on all nodes), or any Hadoop-supported file system URI.</span>
<span class="sd"> The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.</span>
<span class="sd"> A Hadoop configuration can be passed in as a Python dict. This will be converted into a</span>
<span class="sd"> Configuration in Java</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> path : str</span>
<span class="sd"> path to Hadoop file</span>
<span class="sd"> inputFormatClass : str</span>
<span class="sd"> fully qualified classname of Hadoop InputFormat</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.mapreduce.lib.input.TextInputFormat&quot;)</span>
<span class="sd"> keyClass : str</span>
<span class="sd"> fully qualified classname of key Writable class</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.io.Text&quot;)</span>
<span class="sd"> valueClass : str</span>
<span class="sd"> fully qualified classname of value Writable class</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.io.LongWritable&quot;)</span>
<span class="sd"> keyConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning key WritableConverter</span>
<span class="sd"> None by default</span>
<span class="sd"> valueConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning value WritableConverter</span>
<span class="sd"> None by default</span>
<span class="sd"> conf : dict, optional</span>
<span class="sd"> Hadoop configuration, passed in as a dict</span>
<span class="sd"> None by default</span>
<span class="sd"> batchSize : int, optional</span>
<span class="sd"> The number of Python objects represented as a single</span>
<span class="sd"> Java object. (default 0, choose batchSize automatically)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">jconf</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_dictToJavaMap</span><span class="p">(</span><span class="n">conf</span><span class="p">)</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jrdd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonRDD</span><span class="o">.</span><span class="n">newAPIHadoopFile</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">,</span>
<span class="n">path</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">,</span>
<span class="n">jconf</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="n">jrdd</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.newAPIHadoopRDD"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.newAPIHadoopRDD.html#pyspark.SparkContext.newAPIHadoopRDD">[docs]</a> <span class="k">def</span> <span class="nf">newAPIHadoopRDD</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">str</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="n">T</span><span class="p">,</span> <span class="n">U</span><span class="p">]]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read a &#39;new API&#39; Hadoop InputFormat with arbitrary key and value class, from an arbitrary</span>
<span class="sd"> Hadoop configuration, which is passed in as a Python dict.</span>
<span class="sd"> This will be converted into a Configuration in Java.</span>
<span class="sd"> The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> inputFormatClass : str</span>
<span class="sd"> fully qualified classname of Hadoop InputFormat</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.mapreduce.lib.input.TextInputFormat&quot;)</span>
<span class="sd"> keyClass : str</span>
<span class="sd"> fully qualified classname of key Writable class (e.g. &quot;org.apache.hadoop.io.Text&quot;)</span>
<span class="sd"> valueClass : str</span>
<span class="sd"> fully qualified classname of value Writable class</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.io.LongWritable&quot;)</span>
<span class="sd"> keyConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning key WritableConverter</span>
<span class="sd"> (None by default)</span>
<span class="sd"> valueConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning value WritableConverter</span>
<span class="sd"> (None by default)</span>
<span class="sd"> conf : dict, optional</span>
<span class="sd"> Hadoop configuration, passed in as a dict (None by default)</span>
<span class="sd"> batchSize : int, optional</span>
<span class="sd"> The number of Python objects represented as a single</span>
<span class="sd"> Java object. (default 0, choose batchSize automatically)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">jconf</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_dictToJavaMap</span><span class="p">(</span><span class="n">conf</span><span class="p">)</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jrdd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonRDD</span><span class="o">.</span><span class="n">newAPIHadoopRDD</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">,</span>
<span class="n">jconf</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="n">jrdd</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.hadoopFile"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.hadoopFile.html#pyspark.SparkContext.hadoopFile">[docs]</a> <span class="k">def</span> <span class="nf">hadoopFile</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">str</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="n">T</span><span class="p">,</span> <span class="n">U</span><span class="p">]]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read an &#39;old&#39; Hadoop InputFormat with arbitrary key and value class from HDFS,</span>
<span class="sd"> a local file system (available on all nodes), or any Hadoop-supported file system URI.</span>
<span class="sd"> The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.</span>
<span class="sd"> A Hadoop configuration can be passed in as a Python dict. This will be converted into a</span>
<span class="sd"> Configuration in Java.</span>
<span class="sd"> path : str</span>
<span class="sd"> path to Hadoop file</span>
<span class="sd"> inputFormatClass : str</span>
<span class="sd"> fully qualified classname of Hadoop InputFormat</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.mapreduce.lib.input.TextInputFormat&quot;)</span>
<span class="sd"> keyClass : str</span>
<span class="sd"> fully qualified classname of key Writable class (e.g. &quot;org.apache.hadoop.io.Text&quot;)</span>
<span class="sd"> valueClass : str</span>
<span class="sd"> fully qualified classname of value Writable class</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.io.LongWritable&quot;)</span>
<span class="sd"> keyConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning key WritableConverter</span>
<span class="sd"> (None by default)</span>
<span class="sd"> valueConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning value WritableConverter</span>
<span class="sd"> (None by default)</span>
<span class="sd"> conf : dict, optional</span>
<span class="sd"> Hadoop configuration, passed in as a dict (None by default)</span>
<span class="sd"> batchSize : int, optional</span>
<span class="sd"> The number of Python objects represented as a single</span>
<span class="sd"> Java object. (default 0, choose batchSize automatically)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">jconf</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_dictToJavaMap</span><span class="p">(</span><span class="n">conf</span><span class="p">)</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jrdd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonRDD</span><span class="o">.</span><span class="n">hadoopFile</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">,</span>
<span class="n">path</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">,</span>
<span class="n">jconf</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="n">jrdd</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.hadoopRDD"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.hadoopRDD.html#pyspark.SparkContext.hadoopRDD">[docs]</a> <span class="k">def</span> <span class="nf">hadoopRDD</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">conf</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="nb">str</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">:</span> <span class="nb">int</span> <span class="o">=</span> <span class="mi">0</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">Tuple</span><span class="p">[</span><span class="n">T</span><span class="p">,</span> <span class="n">U</span><span class="p">]]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Read an &#39;old&#39; Hadoop InputFormat with arbitrary key and value class, from an arbitrary</span>
<span class="sd"> Hadoop configuration, which is passed in as a Python dict.</span>
<span class="sd"> This will be converted into a Configuration in Java.</span>
<span class="sd"> The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.</span>
<span class="sd"> Parameters</span>
<span class="sd"> ----------</span>
<span class="sd"> inputFormatClass : str</span>
<span class="sd"> fully qualified classname of Hadoop InputFormat</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.mapreduce.lib.input.TextInputFormat&quot;)</span>
<span class="sd"> keyClass : str</span>
<span class="sd"> fully qualified classname of key Writable class (e.g. &quot;org.apache.hadoop.io.Text&quot;)</span>
<span class="sd"> valueClass : str</span>
<span class="sd"> fully qualified classname of value Writable class</span>
<span class="sd"> (e.g. &quot;org.apache.hadoop.io.LongWritable&quot;)</span>
<span class="sd"> keyConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning key WritableConverter</span>
<span class="sd"> (None by default)</span>
<span class="sd"> valueConverter : str, optional</span>
<span class="sd"> fully qualified name of a function returning value WritableConverter</span>
<span class="sd"> (None by default)</span>
<span class="sd"> conf : dict, optional</span>
<span class="sd"> Hadoop configuration, passed in as a dict (None by default)</span>
<span class="sd"> batchSize : int, optional</span>
<span class="sd"> The number of Python objects represented as a single</span>
<span class="sd"> Java object. (default 0, choose batchSize automatically)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">jconf</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_dictToJavaMap</span><span class="p">(</span><span class="n">conf</span><span class="p">)</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jrdd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonRDD</span><span class="o">.</span><span class="n">hadoopRDD</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="p">,</span>
<span class="n">inputFormatClass</span><span class="p">,</span>
<span class="n">keyClass</span><span class="p">,</span>
<span class="n">valueClass</span><span class="p">,</span>
<span class="n">keyConverter</span><span class="p">,</span>
<span class="n">valueConverter</span><span class="p">,</span>
<span class="n">jconf</span><span class="p">,</span>
<span class="n">batchSize</span><span class="p">,</span>
<span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="n">jrdd</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span></div>
<span class="k">def</span> <span class="nf">_checkpointFile</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">input_deserializer</span><span class="p">:</span> <span class="n">PairDeserializer</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">:</span>
<span class="n">jrdd</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">checkpointFile</span><span class="p">(</span><span class="n">name</span><span class="p">)</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="n">jrdd</span><span class="p">,</span> <span class="bp">self</span><span class="p">,</span> <span class="n">input_deserializer</span><span class="p">)</span>
<div class="viewcode-block" id="SparkContext.union"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.union.html#pyspark.SparkContext.union">[docs]</a> <span class="k">def</span> <span class="nf">union</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">rdds</span><span class="p">:</span> <span class="n">List</span><span class="p">[</span><span class="n">RDD</span><span class="p">[</span><span class="n">T</span><span class="p">]])</span> <span class="o">-&gt;</span> <span class="n">RDD</span><span class="p">[</span><span class="n">T</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Build the union of a list of RDDs.</span>
<span class="sd"> This supports unions() of RDDs with different serialized formats,</span>
<span class="sd"> although this forces them to be reserialized using the default</span>
<span class="sd"> serializer:</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; path = os.path.join(tempdir, &quot;union-text.txt&quot;)</span>
<span class="sd"> &gt;&gt;&gt; with open(path, &quot;w&quot;) as testFile:</span>
<span class="sd"> ... _ = testFile.write(&quot;Hello&quot;)</span>
<span class="sd"> &gt;&gt;&gt; textFile = sc.textFile(path)</span>
<span class="sd"> &gt;&gt;&gt; textFile.collect()</span>
<span class="sd"> [&#39;Hello&#39;]</span>
<span class="sd"> &gt;&gt;&gt; parallelized = sc.parallelize([&quot;World!&quot;])</span>
<span class="sd"> &gt;&gt;&gt; sorted(sc.union([textFile, parallelized]).collect())</span>
<span class="sd"> [&#39;Hello&#39;, &#39;World!&#39;]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">first_jrdd_deserializer</span> <span class="o">=</span> <span class="n">rdds</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">_jrdd_deserializer</span>
<span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="n">x</span><span class="o">.</span><span class="n">_jrdd_deserializer</span> <span class="o">!=</span> <span class="n">first_jrdd_deserializer</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">rdds</span><span class="p">):</span>
<span class="n">rdds</span> <span class="o">=</span> <span class="p">[</span><span class="n">x</span><span class="o">.</span><span class="n">_reserialize</span><span class="p">()</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">rdds</span><span class="p">]</span>
<span class="n">gw</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_gateway</span>
<span class="k">assert</span> <span class="n">gw</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jvm</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="o">.</span><span class="n">_jvm</span>
<span class="k">assert</span> <span class="n">jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">jrdd_cls</span> <span class="o">=</span> <span class="n">jvm</span><span class="o">.</span><span class="n">org</span><span class="o">.</span><span class="n">apache</span><span class="o">.</span><span class="n">spark</span><span class="o">.</span><span class="n">api</span><span class="o">.</span><span class="n">java</span><span class="o">.</span><span class="n">JavaRDD</span>
<span class="n">jpair_rdd_cls</span> <span class="o">=</span> <span class="n">jvm</span><span class="o">.</span><span class="n">org</span><span class="o">.</span><span class="n">apache</span><span class="o">.</span><span class="n">spark</span><span class="o">.</span><span class="n">api</span><span class="o">.</span><span class="n">java</span><span class="o">.</span><span class="n">JavaPairRDD</span>
<span class="n">jdouble_rdd_cls</span> <span class="o">=</span> <span class="n">jvm</span><span class="o">.</span><span class="n">org</span><span class="o">.</span><span class="n">apache</span><span class="o">.</span><span class="n">spark</span><span class="o">.</span><span class="n">api</span><span class="o">.</span><span class="n">java</span><span class="o">.</span><span class="n">JavaDoubleRDD</span>
<span class="k">if</span> <span class="n">is_instance_of</span><span class="p">(</span><span class="n">gw</span><span class="p">,</span> <span class="n">rdds</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">_jrdd</span><span class="p">,</span> <span class="n">jrdd_cls</span><span class="p">):</span>
<span class="bp">cls</span> <span class="o">=</span> <span class="n">jrdd_cls</span>
<span class="k">elif</span> <span class="n">is_instance_of</span><span class="p">(</span><span class="n">gw</span><span class="p">,</span> <span class="n">rdds</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">_jrdd</span><span class="p">,</span> <span class="n">jpair_rdd_cls</span><span class="p">):</span>
<span class="bp">cls</span> <span class="o">=</span> <span class="n">jpair_rdd_cls</span>
<span class="k">elif</span> <span class="n">is_instance_of</span><span class="p">(</span><span class="n">gw</span><span class="p">,</span> <span class="n">rdds</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">_jrdd</span><span class="p">,</span> <span class="n">jdouble_rdd_cls</span><span class="p">):</span>
<span class="bp">cls</span> <span class="o">=</span> <span class="n">jdouble_rdd_cls</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">cls_name</span> <span class="o">=</span> <span class="n">rdds</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">_jrdd</span><span class="o">.</span><span class="n">getClass</span><span class="p">()</span><span class="o">.</span><span class="n">getCanonicalName</span><span class="p">()</span>
<span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;Unsupported Java RDD class </span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="n">cls_name</span><span class="p">)</span>
<span class="n">jrdds</span> <span class="o">=</span> <span class="n">gw</span><span class="o">.</span><span class="n">new_array</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">rdds</span><span class="p">))</span>
<span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">rdds</span><span class="p">)):</span>
<span class="n">jrdds</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">rdds</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">_jrdd</span>
<span class="k">return</span> <span class="n">RDD</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">union</span><span class="p">(</span><span class="n">jrdds</span><span class="p">),</span> <span class="bp">self</span><span class="p">,</span> <span class="n">rdds</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span><span class="o">.</span><span class="n">_jrdd_deserializer</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.broadcast"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.broadcast.html#pyspark.SparkContext.broadcast">[docs]</a> <span class="k">def</span> <span class="nf">broadcast</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="n">T</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="s2">&quot;Broadcast[T]&quot;</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`</span>
<span class="sd"> object for reading it in distributed functions. The variable will</span>
<span class="sd"> be sent to each cluster only once.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="n">Broadcast</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_pickled_broadcast_vars</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.accumulator"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.accumulator.html#pyspark.SparkContext.accumulator">[docs]</a> <span class="k">def</span> <span class="nf">accumulator</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="n">T</span><span class="p">,</span> <span class="n">accum_param</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="s2">&quot;AccumulatorParam[T]&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="s2">&quot;Accumulator[T]&quot;</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Create an :class:`Accumulator` with the given initial value, using a given</span>
<span class="sd"> :class:`AccumulatorParam` helper object to define how to add values of the</span>
<span class="sd"> data type if provided. Default AccumulatorParams are used for integers</span>
<span class="sd"> and floating-point numbers if you do not provide one. For other types,</span>
<span class="sd"> a custom AccumulatorParam can be used.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">accum_param</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
<span class="n">accum_param</span> <span class="o">=</span> <span class="n">cast</span><span class="p">(</span><span class="s2">&quot;AccumulatorParam[T]&quot;</span><span class="p">,</span> <span class="n">accumulators</span><span class="o">.</span><span class="n">INT_ACCUMULATOR_PARAM</span><span class="p">)</span>
<span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="nb">float</span><span class="p">):</span>
<span class="n">accum_param</span> <span class="o">=</span> <span class="n">cast</span><span class="p">(</span><span class="s2">&quot;AccumulatorParam[T]&quot;</span><span class="p">,</span> <span class="n">accumulators</span><span class="o">.</span><span class="n">FLOAT_ACCUMULATOR_PARAM</span><span class="p">)</span>
<span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">value</span><span class="p">,</span> <span class="nb">complex</span><span class="p">):</span>
<span class="n">accum_param</span> <span class="o">=</span> <span class="n">cast</span><span class="p">(</span><span class="s2">&quot;AccumulatorParam[T]&quot;</span><span class="p">,</span> <span class="n">accumulators</span><span class="o">.</span><span class="n">COMPLEX_ACCUMULATOR_PARAM</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;No default accumulator param for type </span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="nb">type</span><span class="p">(</span><span class="n">value</span><span class="p">))</span>
<span class="n">SparkContext</span><span class="o">.</span><span class="n">_next_accum_id</span> <span class="o">+=</span> <span class="mi">1</span>
<span class="k">return</span> <span class="n">Accumulator</span><span class="p">(</span><span class="n">SparkContext</span><span class="o">.</span><span class="n">_next_accum_id</span> <span class="o">-</span> <span class="mi">1</span><span class="p">,</span> <span class="n">value</span><span class="p">,</span> <span class="n">accum_param</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.addFile"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.addFile.html#pyspark.SparkContext.addFile">[docs]</a> <span class="k">def</span> <span class="nf">addFile</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">recursive</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Add a file to be downloaded with this Spark job on every node.</span>
<span class="sd"> The `path` passed can be either a local file, a file in HDFS</span>
<span class="sd"> (or other Hadoop-supported filesystems), or an HTTP, HTTPS or</span>
<span class="sd"> FTP URI.</span>
<span class="sd"> To access the file in Spark jobs, use :meth:`SparkFiles.get` with the</span>
<span class="sd"> filename to find its download location.</span>
<span class="sd"> A directory can be given if the recursive option is set to True.</span>
<span class="sd"> Currently directories are only supported for Hadoop-supported filesystems.</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> A path can be added only once. Subsequent additions of the same path are ignored.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; from pyspark import SparkFiles</span>
<span class="sd"> &gt;&gt;&gt; path = os.path.join(tempdir, &quot;test.txt&quot;)</span>
<span class="sd"> &gt;&gt;&gt; with open(path, &quot;w&quot;) as testFile:</span>
<span class="sd"> ... _ = testFile.write(&quot;100&quot;)</span>
<span class="sd"> &gt;&gt;&gt; sc.addFile(path)</span>
<span class="sd"> &gt;&gt;&gt; def func(iterator):</span>
<span class="sd"> ... with open(SparkFiles.get(&quot;test.txt&quot;)) as testFile:</span>
<span class="sd"> ... fileVal = int(testFile.readline())</span>
<span class="sd"> ... return [x * fileVal for x in iterator]</span>
<span class="sd"> &gt;&gt;&gt; sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()</span>
<span class="sd"> [100, 200, 300, 400]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">addFile</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="n">recursive</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.addPyFile"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.addPyFile.html#pyspark.SparkContext.addPyFile">[docs]</a> <span class="k">def</span> <span class="nf">addPyFile</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Add a .py or .zip dependency for all tasks to be executed on this</span>
<span class="sd"> SparkContext in the future. The `path` passed can be either a local</span>
<span class="sd"> file, a file in HDFS (or other Hadoop-supported filesystems), or an</span>
<span class="sd"> HTTP, HTTPS or FTP URI.</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> A path can be added only once. Subsequent additions of the same path are ignored.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">addFile</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="p">(</span><span class="n">dirname</span><span class="p">,</span> <span class="n">filename</span><span class="p">)</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="n">path</span><span class="p">)</span> <span class="c1"># dirname may be directory or HDFS/S3 prefix</span>
<span class="k">if</span> <span class="n">filename</span><span class="p">[</span><span class="o">-</span><span class="mi">4</span><span class="p">:]</span><span class="o">.</span><span class="n">lower</span><span class="p">()</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">PACKAGE_EXTENSIONS</span><span class="p">:</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_python_includes</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_python_includes</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">filename</span><span class="p">)</span>
<span class="c1"># for tests in local mode</span>
<span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">insert</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">SparkFiles</span><span class="o">.</span><span class="n">getRootDirectory</span><span class="p">(),</span> <span class="n">filename</span><span class="p">))</span>
<span class="n">importlib</span><span class="o">.</span><span class="n">invalidate_caches</span><span class="p">()</span></div>
<div class="viewcode-block" id="SparkContext.addArchive"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.addArchive.html#pyspark.SparkContext.addArchive">[docs]</a> <span class="k">def</span> <span class="nf">addArchive</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Add an archive to be downloaded with this Spark job on every node.</span>
<span class="sd"> The `path` passed can be either a local file, a file in HDFS</span>
<span class="sd"> (or other Hadoop-supported filesystems), or an HTTP, HTTPS or</span>
<span class="sd"> FTP URI.</span>
<span class="sd"> To access the file in Spark jobs, use :meth:`SparkFiles.get` with the</span>
<span class="sd"> filename to find its download/unpacked location. The given path should</span>
<span class="sd"> be one of .zip, .tar, .tar.gz, .tgz and .jar.</span>
<span class="sd"> .. versionadded:: 3.3.0</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> A path can be added only once. Subsequent additions of the same path are ignored.</span>
<span class="sd"> This API is experimental.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> Creates a zipped file that contains a text file written &#39;100&#39;.</span>
<span class="sd"> &gt;&gt;&gt; import zipfile</span>
<span class="sd"> &gt;&gt;&gt; from pyspark import SparkFiles</span>
<span class="sd"> &gt;&gt;&gt; path = os.path.join(tempdir, &quot;test.txt&quot;)</span>
<span class="sd"> &gt;&gt;&gt; zip_path = os.path.join(tempdir, &quot;test.zip&quot;)</span>
<span class="sd"> &gt;&gt;&gt; with zipfile.ZipFile(zip_path, &quot;w&quot;, zipfile.ZIP_DEFLATED) as zipped:</span>
<span class="sd"> ... with open(path, &quot;w&quot;) as f:</span>
<span class="sd"> ... _ = f.write(&quot;100&quot;)</span>
<span class="sd"> ... zipped.write(path, os.path.basename(path))</span>
<span class="sd"> &gt;&gt;&gt; sc.addArchive(zip_path)</span>
<span class="sd"> Reads the &#39;100&#39; as an integer in the zipped file, and processes</span>
<span class="sd"> it with the data in the RDD.</span>
<span class="sd"> &gt;&gt;&gt; def func(iterator):</span>
<span class="sd"> ... with open(&quot;%s/test.txt&quot; % SparkFiles.get(&quot;test.zip&quot;)) as f:</span>
<span class="sd"> ... v = int(f.readline())</span>
<span class="sd"> ... return [x * int(v) for x in iterator]</span>
<span class="sd"> &gt;&gt;&gt; sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()</span>
<span class="sd"> [100, 200, 300, 400]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">addArchive</span><span class="p">(</span><span class="n">path</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.setCheckpointDir"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.setCheckpointDir.html#pyspark.SparkContext.setCheckpointDir">[docs]</a> <span class="k">def</span> <span class="nf">setCheckpointDir</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dirName</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Set the directory under which RDDs are going to be checkpointed. The</span>
<span class="sd"> directory must be an HDFS path if running on a cluster.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">setCheckpointDir</span><span class="p">(</span><span class="n">dirName</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.getCheckpointDir"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.getCheckpointDir.html#pyspark.SparkContext.getCheckpointDir">[docs]</a> <span class="nd">@since</span><span class="p">(</span><span class="mf">3.1</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">getCheckpointDir</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Return the directory where RDDs are checkpointed. Returns None if no</span>
<span class="sd"> checkpoint directory has been set.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">getCheckpointDir</span><span class="p">()</span><span class="o">.</span><span class="n">isEmpty</span><span class="p">():</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">getCheckpointDir</span><span class="p">()</span><span class="o">.</span><span class="n">get</span><span class="p">()</span>
<span class="k">return</span> <span class="kc">None</span></div>
<span class="k">def</span> <span class="nf">_getJavaStorageLevel</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">storageLevel</span><span class="p">:</span> <span class="n">StorageLevel</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">JavaObject</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Returns a Java StorageLevel based on a pyspark.StorageLevel.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">storageLevel</span><span class="p">,</span> <span class="n">StorageLevel</span><span class="p">):</span>
<span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;storageLevel must be of type pyspark.StorageLevel&quot;</span><span class="p">)</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">newStorageLevel</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">org</span><span class="o">.</span><span class="n">apache</span><span class="o">.</span><span class="n">spark</span><span class="o">.</span><span class="n">storage</span><span class="o">.</span><span class="n">StorageLevel</span>
<span class="k">return</span> <span class="n">newStorageLevel</span><span class="p">(</span>
<span class="n">storageLevel</span><span class="o">.</span><span class="n">useDisk</span><span class="p">,</span>
<span class="n">storageLevel</span><span class="o">.</span><span class="n">useMemory</span><span class="p">,</span>
<span class="n">storageLevel</span><span class="o">.</span><span class="n">useOffHeap</span><span class="p">,</span>
<span class="n">storageLevel</span><span class="o">.</span><span class="n">deserialized</span><span class="p">,</span>
<span class="n">storageLevel</span><span class="o">.</span><span class="n">replication</span><span class="p">,</span>
<span class="p">)</span>
<div class="viewcode-block" id="SparkContext.setJobGroup"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.setJobGroup.html#pyspark.SparkContext.setJobGroup">[docs]</a> <span class="k">def</span> <span class="nf">setJobGroup</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">groupId</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">description</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">interruptOnCancel</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Assigns a group ID to all the jobs started by this thread until the group ID is set to a</span>
<span class="sd"> different value or cleared.</span>
<span class="sd"> Often, a unit of execution in an application consists of multiple Spark actions or jobs.</span>
<span class="sd"> Application programmers can use this method to group all those jobs together and give a</span>
<span class="sd"> group description. Once set, the Spark web UI will associate such jobs with this group.</span>
<span class="sd"> The application can use :meth:`SparkContext.cancelJobGroup` to cancel all</span>
<span class="sd"> running jobs in this group.</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> If interruptOnCancel is set to true for the job group, then job cancellation will result</span>
<span class="sd"> in Thread.interrupt() being called on the job&#39;s executor threads. This is useful to help</span>
<span class="sd"> ensure that the tasks are actually stopped in a timely manner, but is off by default due</span>
<span class="sd"> to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.</span>
<span class="sd"> If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread</span>
<span class="sd"> local inheritance.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; import threading</span>
<span class="sd"> &gt;&gt;&gt; from time import sleep</span>
<span class="sd"> &gt;&gt;&gt; from pyspark import InheritableThread</span>
<span class="sd"> &gt;&gt;&gt; result = &quot;Not Set&quot;</span>
<span class="sd"> &gt;&gt;&gt; lock = threading.Lock()</span>
<span class="sd"> &gt;&gt;&gt; def map_func(x):</span>
<span class="sd"> ... sleep(100)</span>
<span class="sd"> ... raise RuntimeError(&quot;Task should have been cancelled&quot;)</span>
<span class="sd"> &gt;&gt;&gt; def start_job(x):</span>
<span class="sd"> ... global result</span>
<span class="sd"> ... try:</span>
<span class="sd"> ... sc.setJobGroup(&quot;job_to_cancel&quot;, &quot;some description&quot;)</span>
<span class="sd"> ... result = sc.parallelize(range(x)).map(map_func).collect()</span>
<span class="sd"> ... except Exception as e:</span>
<span class="sd"> ... result = &quot;Cancelled&quot;</span>
<span class="sd"> ... lock.release()</span>
<span class="sd"> &gt;&gt;&gt; def stop_job():</span>
<span class="sd"> ... sleep(5)</span>
<span class="sd"> ... sc.cancelJobGroup(&quot;job_to_cancel&quot;)</span>
<span class="sd"> &gt;&gt;&gt; suppress = lock.acquire()</span>
<span class="sd"> &gt;&gt;&gt; suppress = InheritableThread(target=start_job, args=(10,)).start()</span>
<span class="sd"> &gt;&gt;&gt; suppress = InheritableThread(target=stop_job).start()</span>
<span class="sd"> &gt;&gt;&gt; suppress = lock.acquire()</span>
<span class="sd"> &gt;&gt;&gt; print(result)</span>
<span class="sd"> Cancelled</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">setJobGroup</span><span class="p">(</span><span class="n">groupId</span><span class="p">,</span> <span class="n">description</span><span class="p">,</span> <span class="n">interruptOnCancel</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.setLocalProperty"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.setLocalProperty.html#pyspark.SparkContext.setLocalProperty">[docs]</a> <span class="k">def</span> <span class="nf">setLocalProperty</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Set a local property that affects jobs submitted from this thread, such as the</span>
<span class="sd"> Spark fair scheduler pool.</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread</span>
<span class="sd"> local inheritance.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">setLocalProperty</span><span class="p">(</span><span class="n">key</span><span class="p">,</span> <span class="n">value</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.getLocalProperty"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.getLocalProperty.html#pyspark.SparkContext.getLocalProperty">[docs]</a> <span class="k">def</span> <span class="nf">getLocalProperty</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">key</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Get a local property set in this thread, or null if it is missing. See</span>
<span class="sd"> :meth:`setLocalProperty`.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">getLocalProperty</span><span class="p">(</span><span class="n">key</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.setJobDescription"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.setJobDescription.html#pyspark.SparkContext.setJobDescription">[docs]</a> <span class="k">def</span> <span class="nf">setJobDescription</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">value</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Set a human readable description of the current job.</span>
<span class="sd"> Notes</span>
<span class="sd"> -----</span>
<span class="sd"> If you run jobs in parallel, use :class:`pyspark.InheritableThread` for thread</span>
<span class="sd"> local inheritance.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">setJobDescription</span><span class="p">(</span><span class="n">value</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.sparkUser"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.sparkUser.html#pyspark.SparkContext.sparkUser">[docs]</a> <span class="k">def</span> <span class="nf">sparkUser</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Get SPARK_USER for user who is running SparkContext.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">sparkUser</span><span class="p">()</span></div>
<div class="viewcode-block" id="SparkContext.cancelJobGroup"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.cancelJobGroup.html#pyspark.SparkContext.cancelJobGroup">[docs]</a> <span class="k">def</span> <span class="nf">cancelJobGroup</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">groupId</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Cancel active jobs for the specified group. See :meth:`SparkContext.setJobGroup`.</span>
<span class="sd"> for more information.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">cancelJobGroup</span><span class="p">(</span><span class="n">groupId</span><span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.cancelAllJobs"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.cancelAllJobs.html#pyspark.SparkContext.cancelAllJobs">[docs]</a> <span class="k">def</span> <span class="nf">cancelAllJobs</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Cancel all jobs that have been scheduled or are running.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">()</span><span class="o">.</span><span class="n">cancelAllJobs</span><span class="p">()</span></div>
<div class="viewcode-block" id="SparkContext.statusTracker"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.statusTracker.html#pyspark.SparkContext.statusTracker">[docs]</a> <span class="k">def</span> <span class="nf">statusTracker</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">StatusTracker</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Return :class:`StatusTracker` object</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="n">StatusTracker</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">statusTracker</span><span class="p">())</span></div>
<div class="viewcode-block" id="SparkContext.runJob"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.runJob.html#pyspark.SparkContext.runJob">[docs]</a> <span class="k">def</span> <span class="nf">runJob</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">rdd</span><span class="p">:</span> <span class="n">RDD</span><span class="p">[</span><span class="n">T</span><span class="p">],</span>
<span class="n">partitionFunc</span><span class="p">:</span> <span class="n">Callable</span><span class="p">[[</span><span class="n">Iterable</span><span class="p">[</span><span class="n">T</span><span class="p">]],</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">U</span><span class="p">]],</span>
<span class="n">partitions</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Sequence</span><span class="p">[</span><span class="nb">int</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">allowLocal</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">List</span><span class="p">[</span><span class="n">U</span><span class="p">]:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Executes the given partitionFunc on the specified set of partitions,</span>
<span class="sd"> returning the result as an array of elements.</span>
<span class="sd"> If &#39;partitions&#39; is not specified, this will run over all partitions.</span>
<span class="sd"> Examples</span>
<span class="sd"> --------</span>
<span class="sd"> &gt;&gt;&gt; myRDD = sc.parallelize(range(6), 3)</span>
<span class="sd"> &gt;&gt;&gt; sc.runJob(myRDD, lambda part: [x * x for x in part])</span>
<span class="sd"> [0, 1, 4, 9, 16, 25]</span>
<span class="sd"> &gt;&gt;&gt; myRDD = sc.parallelize(range(6), 3)</span>
<span class="sd"> &gt;&gt;&gt; sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)</span>
<span class="sd"> [0, 1, 16, 25]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">partitions</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">partitions</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="n">rdd</span><span class="o">.</span><span class="n">_jrdd</span><span class="o">.</span><span class="n">partitions</span><span class="p">()</span><span class="o">.</span><span class="n">size</span><span class="p">()))</span>
<span class="c1"># Implementation note: This is implemented as a mapPartitions followed</span>
<span class="c1"># by runJob() in order to avoid having to pass a Python lambda into</span>
<span class="c1"># SparkContext#runJob.</span>
<span class="n">mappedRDD</span> <span class="o">=</span> <span class="n">rdd</span><span class="o">.</span><span class="n">mapPartitions</span><span class="p">(</span><span class="n">partitionFunc</span><span class="p">)</span>
<span class="k">assert</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
<span class="n">sock_info</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jvm</span><span class="o">.</span><span class="n">PythonRDD</span><span class="o">.</span><span class="n">runJob</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">sc</span><span class="p">(),</span> <span class="n">mappedRDD</span><span class="o">.</span><span class="n">_jrdd</span><span class="p">,</span> <span class="n">partitions</span><span class="p">)</span>
<span class="k">return</span> <span class="nb">list</span><span class="p">(</span><span class="n">_load_from_socket</span><span class="p">(</span><span class="n">sock_info</span><span class="p">,</span> <span class="n">mappedRDD</span><span class="o">.</span><span class="n">_jrdd_deserializer</span><span class="p">))</span></div>
<div class="viewcode-block" id="SparkContext.show_profiles"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.show_profiles.html#pyspark.SparkContext.show_profiles">[docs]</a> <span class="k">def</span> <span class="nf">show_profiles</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;Print the profile stats to stdout&quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">profiler_collector</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">profiler_collector</span><span class="o">.</span><span class="n">show_profiles</span><span class="p">()</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="s2">&quot;&#39;spark.python.profile&#39; configuration must be set &quot;</span>
<span class="s2">&quot;to &#39;true&#39; to enable Python profile.&quot;</span>
<span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.dump_profiles"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.dump_profiles.html#pyspark.SparkContext.dump_profiles">[docs]</a> <span class="k">def</span> <span class="nf">dump_profiles</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">path</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;Dump the profile stats into directory `path`&quot;&quot;&quot;</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">profiler_collector</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">profiler_collector</span><span class="o">.</span><span class="n">dump_profiles</span><span class="p">(</span><span class="n">path</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="s2">&quot;&#39;spark.python.profile&#39; configuration must be set &quot;</span>
<span class="s2">&quot;to &#39;true&#39; to enable Python profile.&quot;</span>
<span class="p">)</span></div>
<div class="viewcode-block" id="SparkContext.getConf"><a class="viewcode-back" href="../../reference/api/pyspark.SparkContext.getConf.html#pyspark.SparkContext.getConf">[docs]</a> <span class="k">def</span> <span class="nf">getConf</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">SparkConf</span><span class="p">:</span>
<span class="n">conf</span> <span class="o">=</span> <span class="n">SparkConf</span><span class="p">()</span>
<span class="n">conf</span><span class="o">.</span><span class="n">setAll</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">_conf</span><span class="o">.</span><span class="n">getAll</span><span class="p">())</span>
<span class="k">return</span> <span class="n">conf</span></div>
<span class="nd">@property</span>
<span class="k">def</span> <span class="nf">resources</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">ResourceInformation</span><span class="p">]:</span>
<span class="n">resources</span> <span class="o">=</span> <span class="p">{}</span>
<span class="n">jresources</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_jsc</span><span class="o">.</span><span class="n">resources</span><span class="p">()</span>
<span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">jresources</span><span class="p">:</span>
<span class="n">name</span> <span class="o">=</span> <span class="n">jresources</span><span class="p">[</span><span class="n">x</span><span class="p">]</span><span class="o">.</span><span class="n">name</span><span class="p">()</span>
<span class="n">jaddresses</span> <span class="o">=</span> <span class="n">jresources</span><span class="p">[</span><span class="n">x</span><span class="p">]</span><span class="o">.</span><span class="n">addresses</span><span class="p">()</span>
<span class="n">addrs</span> <span class="o">=</span> <span class="p">[</span><span class="n">addr</span> <span class="k">for</span> <span class="n">addr</span> <span class="ow">in</span> <span class="n">jaddresses</span><span class="p">]</span>
<span class="n">resources</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="n">ResourceInformation</span><span class="p">(</span><span class="n">name</span><span class="p">,</span> <span class="n">addrs</span><span class="p">)</span>
<span class="k">return</span> <span class="n">resources</span>
<span class="nd">@staticmethod</span>
<span class="k">def</span> <span class="nf">_assert_on_driver</span><span class="p">()</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Called to ensure that SparkContext is created only on the Driver.</span>
<span class="sd"> Throws an exception if a SparkContext is about to be created in executors.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">TaskContext</span><span class="o">.</span><span class="n">get</span><span class="p">()</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;SparkContext should only be created and accessed on the driver.&quot;</span><span class="p">)</span></div>
<span class="k">def</span> <span class="nf">_test</span><span class="p">()</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
<span class="kn">import</span> <span class="nn">atexit</span>
<span class="kn">import</span> <span class="nn">doctest</span>
<span class="kn">import</span> <span class="nn">tempfile</span>
<span class="n">globs</span> <span class="o">=</span> <span class="nb">globals</span><span class="p">()</span><span class="o">.</span><span class="n">copy</span><span class="p">()</span>
<span class="n">globs</span><span class="p">[</span><span class="s2">&quot;sc&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">SparkContext</span><span class="p">(</span><span class="s2">&quot;local[4]&quot;</span><span class="p">,</span> <span class="s2">&quot;PythonTest&quot;</span><span class="p">)</span>
<span class="n">globs</span><span class="p">[</span><span class="s2">&quot;tempdir&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">tempfile</span><span class="o">.</span><span class="n">mkdtemp</span><span class="p">()</span>
<span class="n">atexit</span><span class="o">.</span><span class="n">register</span><span class="p">(</span><span class="k">lambda</span><span class="p">:</span> <span class="n">shutil</span><span class="o">.</span><span class="n">rmtree</span><span class="p">(</span><span class="n">globs</span><span class="p">[</span><span class="s2">&quot;tempdir&quot;</span><span class="p">]))</span>
<span class="p">(</span><span class="n">failure_count</span><span class="p">,</span> <span class="n">test_count</span><span class="p">)</span> <span class="o">=</span> <span class="n">doctest</span><span class="o">.</span><span class="n">testmod</span><span class="p">(</span><span class="n">globs</span><span class="o">=</span><span class="n">globs</span><span class="p">,</span> <span class="n">optionflags</span><span class="o">=</span><span class="n">doctest</span><span class="o">.</span><span class="n">ELLIPSIS</span><span class="p">)</span>
<span class="n">globs</span><span class="p">[</span><span class="s2">&quot;sc&quot;</span><span class="p">]</span><span class="o">.</span><span class="n">stop</span><span class="p">()</span>
<span class="k">if</span> <span class="n">failure_count</span><span class="p">:</span>
<span class="n">sys</span><span class="o">.</span><span class="n">exit</span><span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span>
<span class="k">if</span> <span class="vm">__name__</span> <span class="o">==</span> <span class="s2">&quot;__main__&quot;</span><span class="p">:</span>
<span class="n">_test</span><span class="p">()</span>
</pre></div>
</div>
<div class='prev-next-bottom'>
</div>
</main>
</div>
</div>
<script src="../../_static/js/index.3da636dd464baa7582d2.js"></script>
<footer class="footer mt-5 mt-md-0">
<div class="container">
<p>
&copy; Copyright .<br/>
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 3.0.4.<br/>
</p>
</div>
</footer>
</body>
</html>