| |
| |
| |
| <!DOCTYPE html> |
| <html class="writer-html5" lang="en" > |
| <head> |
| <meta charset="utf-8" /> |
| |
| <meta name="viewport" content="width=device-width, initial-scale=1.0" /> |
| |
| <title>CUDA Integration — Apache Arrow v3.0.0</title> |
| |
| |
| |
| <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" /> |
| <link rel="stylesheet" href="../_static/pygments.css" type="text/css" /> |
| <link rel="stylesheet" href="../_static/theme_overrides.css" type="text/css" /> |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| <!--[if lt IE 9]> |
| <script src="../_static/js/html5shiv.min.js"></script> |
| <![endif]--> |
| |
| |
| <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script> |
| <script src="../_static/jquery.js"></script> |
| <script src="../_static/underscore.js"></script> |
| <script src="../_static/doctools.js"></script> |
| <script src="../_static/language_data.js"></script> |
| |
| <script type="text/javascript" src="../_static/js/theme.js"></script> |
| |
| |
| <link rel="canonical" href="https://arrow.apache.org/docs/python/cuda.html" /> |
| <link rel="index" title="Index" href="../genindex.html" /> |
| <link rel="search" title="Search" href="../search.html" /> |
| <link rel="next" title="Extending pyarrow" href="extending_types.html" /> |
| <link rel="prev" title="Tabular Datasets" href="dataset.html" /> |
|
|
|
|
| <!-- Matomo -->
|
| <script>
|
| var _paq = window._paq = window._paq || [];
|
| /* tracker methods like "setCustomDimension" should be called before "trackPageView" */
|
| _paq.push(["setDoNotTrack", true]);
|
| _paq.push(["disableCookies"]);
|
| _paq.push(['trackPageView']);
|
| _paq.push(['enableLinkTracking']);
|
| (function() {
|
| var u="https://analytics.apache.org/";
|
| _paq.push(['setTrackerUrl', u+'matomo.php']);
|
| _paq.push(['setSiteId', '20']);
|
| var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
|
| g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
|
| })();
|
| </script>
|
| <!-- End Matomo Code -->
|
|
|
| </head> |
| |
| <body class="wy-body-for-nav"> |
| |
| |
| <div class="wy-grid-for-nav"> |
| |
| <nav data-toggle="wy-nav-shift" class="wy-nav-side"> |
| <div class="wy-side-scroll"> |
| <div class="wy-side-nav-search" > |
| |
| |
| |
| <a href="../index.html" class="icon icon-home"> Apache Arrow |
| |
| |
| |
| </a> |
| |
| |
| |
| |
| <div class="version"> |
| 3.0.0 |
| </div> |
| |
| |
| |
| |
| <div role="search"> |
| <form id="rtd-search-form" class="wy-form" action="../search.html" method="get"> |
| <input type="text" name="q" placeholder="Search docs" /> |
| <input type="hidden" name="check_keywords" value="yes" /> |
| <input type="hidden" name="area" value="default" /> |
| </form> |
| </div> |
| |
| |
| </div> |
| |
| |
| <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation"> |
| |
| |
| |
| |
| |
| |
| <p class="caption"><span class="caption-text">Specifications and Protocols</span></p> |
| <ul> |
| <li class="toctree-l1"><a class="reference internal" href="../format/Versioning.html">Format Versioning and Stability</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../format/Columnar.html">Arrow Columnar Format</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../format/Flight.html">Arrow Flight RPC</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../format/Integration.html">Integration Testing</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../format/CDataInterface.html">The Arrow C data interface</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../format/CStreamInterface.html">The Arrow C stream interface</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../format/Other.html">Other Data Structures</a></li> |
| </ul> |
| <p class="caption"><span class="caption-text">Libraries</span></p> |
| <ul class="current"> |
| <li class="toctree-l1"><a class="reference internal" href="../status.html">Implementation Status</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://arrow.apache.org/docs/c_glib/">C/GLib</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../cpp/index.html">C++</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://github.com/apache/arrow/blob/master/csharp/README.md">C#</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://godoc.org/github.com/apache/arrow/go/arrow">Go</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../java/index.html">Java</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://arrow.apache.org/docs/js/">JavaScript</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://github.com/apache/arrow/blob/master/julia/Arrow/README.md">Julia</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://github.com/apache/arrow/blob/master/matlab/README.md">MATLAB</a></li> |
| <li class="toctree-l1 current"><a class="reference internal" href="index.html">Python</a><ul class="current"> |
| <li class="toctree-l2"><a class="reference internal" href="install.html">Installing PyArrow</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="memory.html">Memory and IO Interfaces</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="data.html">Data Types and In-Memory Data Model</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="compute.html">Compute Functions</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="ipc.html">Streaming, Serialization, and IPC</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="filesystems.html">Filesystem Interface</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="filesystems_deprecated.html">Filesystem Interface (legacy)</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="plasma.html">The Plasma In-Memory Object Store</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="numpy.html">NumPy Integration</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="pandas.html">Pandas Integration</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="timestamps.html">Timestamps</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="csv.html">Reading CSV files</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="feather.html">Feather File Format</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="json.html">Reading JSON files</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="parquet.html">Reading and Writing the Apache Parquet Format</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="dataset.html">Tabular Datasets</a></li> |
| <li class="toctree-l2 current"><a class="current reference internal" href="#">CUDA Integration</a><ul> |
| <li class="toctree-l3"><a class="reference internal" href="#cuda-contexts">CUDA Contexts</a></li> |
| <li class="toctree-l3"><a class="reference internal" href="#cuda-buffers">CUDA Buffers</a></li> |
| <li class="toctree-l3"><a class="reference internal" href="#numba-integration">Numba Integration</a><ul> |
| <li class="toctree-l4"><a class="reference internal" href="#arrow-to-numba">Arrow to Numba</a></li> |
| <li class="toctree-l4"><a class="reference internal" href="#numba-to-arrow">Numba to Arrow</a></li> |
| </ul> |
| </li> |
| </ul> |
| </li> |
| <li class="toctree-l2"><a class="reference internal" href="extending_types.html">Extending pyarrow</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="extending.html">Using pyarrow from C++ and Cython Code</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="api.html">API Reference</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="getting_involved.html">Getting Involved</a></li> |
| <li class="toctree-l2"><a class="reference internal" href="benchmarks.html">Benchmarks</a></li> |
| </ul> |
| </li> |
| <li class="toctree-l1"><a class="reference external" href="https://arrow.apache.org/docs/r/">R</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://github.com/apache/arrow/blob/master/ruby/README.md">Ruby</a></li> |
| <li class="toctree-l1"><a class="reference external" href="https://docs.rs/crate/arrow/">Rust</a></li> |
| </ul> |
| <p class="caption"><span class="caption-text">Development</span></p> |
| <ul> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/contributing.html">Contributing to Apache Arrow</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/cpp/index.html">C++ Development</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/python.html">Python Development</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/archery.html">Daily Development using Archery</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/crossbow.html">Packaging and Testing with Crossbow</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/docker.html">Running Docker Builds</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/benchmarks.html">Benchmarks</a></li> |
| <li class="toctree-l1"><a class="reference internal" href="../developers/documentation.html">Building the Documentation</a></li> |
| </ul> |
| |
| |
| |
| </div> |
| |
| </div> |
| </nav> |
| |
| <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap"> |
| |
| |
| <nav class="wy-nav-top" aria-label="top navigation"> |
| |
| <i data-toggle="wy-nav-top" class="fa fa-bars"></i> |
| <a href="../index.html">Apache Arrow</a> |
| |
| </nav> |
| |
| |
| <div class="wy-nav-content"> |
| |
| <div class="rst-content"> |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| <div role="navigation" aria-label="breadcrumbs navigation"> |
| |
| <ul class="wy-breadcrumbs"> |
| |
| <li><a href="../index.html" class="icon icon-home"></a> »</li> |
| |
| <li><a href="index.html">Python bindings</a> »</li> |
| |
| <li>CUDA Integration</li> |
| |
| |
| <li class="wy-breadcrumbs-aside"> |
| |
| |
| <a href="../_sources/python/cuda.rst.txt" rel="nofollow"> View page source</a> |
| |
| |
| </li> |
| |
| </ul> |
| |
| |
| <hr/> |
| </div> |
| <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article"> |
| <div itemprop="articleBody"> |
| |
| <div class="section" id="cuda-integration"> |
| <h1>CUDA Integration<a class="headerlink" href="#cuda-integration" title="Permalink to this headline">¶</a></h1> |
| <p>Arrow is not limited to CPU buffers (located in the computer’s main memory, |
| also named “host memory”). It also has provisions for accessing buffers |
| located on a CUDA-capable GPU device (in “device memory”).</p> |
| <div class="admonition note"> |
| <p class="admonition-title">Note</p> |
| <p>This functionality is optional and must have been enabled at build time. |
| If this is not done by your package manager, you might have to build Arrow |
| yourself.</p> |
| </div> |
| <div class="section" id="cuda-contexts"> |
| <h2>CUDA Contexts<a class="headerlink" href="#cuda-contexts" title="Permalink to this headline">¶</a></h2> |
| <p>A CUDA context represents access to a particular CUDA-capable device. |
| For example, this is creating a CUDA context accessing CUDA device number 0:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">pyarrow</span> <span class="k">import</span> <span class="n">cuda</span> |
| <span class="gp">>>> </span><span class="n">ctx</span> <span class="o">=</span> <span class="n">cuda</span><span class="o">.</span><span class="n">Context</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span> |
| <span class="go">>>></span> |
| </pre></div> |
| </div> |
| </div> |
| <div class="section" id="cuda-buffers"> |
| <h2>CUDA Buffers<a class="headerlink" href="#cuda-buffers" title="Permalink to this headline">¶</a></h2> |
| <p>A CUDA buffer can be created by copying data from host memory to the memory |
| of a CUDA device, using the <a class="reference internal" href="generated/pyarrow.cuda.Context.html#pyarrow.cuda.Context.buffer_from_data" title="pyarrow.cuda.Context.buffer_from_data"><code class="xref py py-meth docutils literal notranslate"><span class="pre">Context.buffer_from_data()</span></code></a> method. |
| The source data can be any Python buffer-like object, including Arrow buffers:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span> |
| <span class="gp">>>> </span><span class="n">arr</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span> |
| <span class="gp">>>> </span><span class="n">arr</span><span class="o">.</span><span class="n">nbytes</span> |
| <span class="go">16</span> |
| <span class="gp">>>> </span><span class="n">cuda_buf</span> <span class="o">=</span> <span class="n">ctx</span><span class="o">.</span><span class="n">buffer_from_data</span><span class="p">(</span><span class="n">arr</span><span class="p">)</span> |
| <span class="gp">>>> </span><span class="nb">type</span><span class="p">(</span><span class="n">cuda_buf</span><span class="p">)</span> |
| <span class="go">pyarrow._cuda.CudaBuffer</span> |
| <span class="gp">>>> </span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">size</span> <span class="c1"># The buffer's size in bytes</span> |
| <span class="go">16</span> |
| <span class="gp">>>> </span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">address</span> <span class="c1"># The buffer's address in device memory</span> |
| <span class="go">30088364544</span> |
| <span class="gp">>>> </span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">context</span><span class="o">.</span><span class="n">device_number</span> |
| <span class="go">0</span> |
| </pre></div> |
| </div> |
| <p>Conversely, you can copy back a CUDA buffer to device memory, getting a regular |
| CPU buffer:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">buf</span> <span class="o">=</span> <span class="n">cuda_buf</span><span class="o">.</span><span class="n">copy_to_host</span><span class="p">()</span> |
| <span class="gp">>>> </span><span class="nb">type</span><span class="p">(</span><span class="n">buf</span><span class="p">)</span> |
| <span class="go">pyarrow.lib.Buffer</span> |
| <span class="gp">>>> </span><span class="n">np</span><span class="o">.</span><span class="n">frombuffer</span><span class="p">(</span><span class="n">buf</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span> |
| <span class="go">array([0, 1, 2, 3], dtype=int32)</span> |
| </pre></div> |
| </div> |
| <div class="admonition warning"> |
| <p class="admonition-title">Warning</p> |
| <p>Many Arrow functions expect a CPU buffer but will not check the buffer’s |
| actual type. You will get a crash if you pass a CUDA buffer to such a |
| function:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">pa</span><span class="o">.</span><span class="n">py_buffer</span><span class="p">(</span><span class="sa">b</span><span class="s2">"x"</span> <span class="o">*</span> <span class="mi">16</span><span class="p">)</span><span class="o">.</span><span class="n">equals</span><span class="p">(</span><span class="n">cuda_buf</span><span class="p">)</span> |
| <span class="go">Segmentation fault</span> |
| </pre></div> |
| </div> |
| </div> |
| </div> |
| <div class="section" id="numba-integration"> |
| <h2>Numba Integration<a class="headerlink" href="#numba-integration" title="Permalink to this headline">¶</a></h2> |
| <p>There is not much you can do directly with Arrow CUDA buffers from Python, |
| but they support interoperation with <a class="reference external" href="https://numba.pydata.org/">Numba</a>, |
| a JIT compiler which can turn Python code into optimized CUDA kernels.</p> |
| <div class="section" id="arrow-to-numba"> |
| <h3>Arrow to Numba<a class="headerlink" href="#arrow-to-numba" title="Permalink to this headline">¶</a></h3> |
| <p>First let’s define a Numba CUDA kernel operating on an <code class="docutils literal notranslate"><span class="pre">int32</span></code> array. Here, |
| we will simply increment each array element (assuming the array is writable):</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="kn">import</span> <span class="nn">numba.cuda</span> |
| |
| <span class="nd">@numba</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">jit</span> |
| <span class="k">def</span> <span class="nf">increment_by_one</span><span class="p">(</span><span class="n">an_array</span><span class="p">):</span> |
| <span class="n">pos</span> <span class="o">=</span> <span class="n">numba</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">grid</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span> |
| <span class="k">if</span> <span class="n">pos</span> <span class="o"><</span> <span class="n">an_array</span><span class="o">.</span><span class="n">size</span><span class="p">:</span> |
| <span class="n">an_array</span><span class="p">[</span><span class="n">pos</span><span class="p">]</span> <span class="o">+=</span> <span class="mi">1</span> |
| </pre></div> |
| </div> |
| <p>Then we need to wrap our CUDA buffer into a Numba “device array” with the right |
| array metadata (shape, strides and datatype). This is necessary so that Numba |
| can identify the array’s characteristics and compile the kernel with the |
| appropriate type declarations.</p> |
| <p>In this case the metadata can simply be got from the original Numpy array. |
| Note the GPU data isn’t copied, just pointed to:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">from</span> <span class="nn">numba.cuda.cudadrv.devicearray</span> <span class="k">import</span> <span class="n">DeviceNDArray</span> |
| <span class="gp">>>> </span><span class="n">device_arr</span> <span class="o">=</span> <span class="n">DeviceNDArray</span><span class="p">(</span><span class="n">arr</span><span class="o">.</span><span class="n">shape</span><span class="p">,</span> <span class="n">arr</span><span class="o">.</span><span class="n">strides</span><span class="p">,</span> <span class="n">arr</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span> <span class="n">gpu_data</span><span class="o">=</span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">to_numba</span><span class="p">())</span> |
| </pre></div> |
| </div> |
| <p>(ideally we could have defined an Arrow array in CPU memory, copied it to CUDA |
| memory without losing type information, and then invoked the Numba kernel on it |
| without constructing the DeviceNDArray by hand; this is not yet possible)</p> |
| <p>Finally we can run the Numba CUDA kernel on the Numba device array (here |
| with a 16x16 grid size):</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">increment_by_one</span><span class="p">[</span><span class="mi">16</span><span class="p">,</span> <span class="mi">16</span><span class="p">](</span><span class="n">device_arr</span><span class="p">)</span> |
| </pre></div> |
| </div> |
| <p>And the results can be checked by copying back the CUDA buffer to CPU memory:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">np</span><span class="o">.</span><span class="n">frombuffer</span><span class="p">(</span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">copy_to_host</span><span class="p">(),</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span> |
| <span class="go">array([1, 2, 3, 4], dtype=int32)</span> |
| </pre></div> |
| </div> |
| </div> |
| <div class="section" id="numba-to-arrow"> |
| <h3>Numba to Arrow<a class="headerlink" href="#numba-to-arrow" title="Permalink to this headline">¶</a></h3> |
| <p>Conversely, a Numba-created device array can be viewed as an Arrow CUDA buffer, |
| using the <a class="reference internal" href="generated/pyarrow.cuda.CudaBuffer.html#pyarrow.cuda.CudaBuffer.from_numba" title="pyarrow.cuda.CudaBuffer.from_numba"><code class="xref py py-meth docutils literal notranslate"><span class="pre">CudaBuffer.from_numba()</span></code></a> factory method.</p> |
| <p>For the sake of example, let’s first create a Numba device array:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">arr</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">arange</span><span class="p">(</span><span class="mi">10</span><span class="p">,</span> <span class="mi">14</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span> |
| <span class="gp">>>> </span><span class="n">arr</span> |
| <span class="go">array([10, 11, 12, 13], dtype=int32)</span> |
| <span class="gp">>>> </span><span class="n">device_arr</span> <span class="o">=</span> <span class="n">numba</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">to_device</span><span class="p">(</span><span class="n">arr</span><span class="p">)</span> |
| </pre></div> |
| </div> |
| <p>Then we can create a CUDA buffer pointing the device array’s memory. |
| We don’t need to pass a CUDA context explicitly this time: the appropriate |
| CUDA context is automatically retrieved and adapted from the Numba object.</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">cuda_buf</span> <span class="o">=</span> <span class="n">cuda</span><span class="o">.</span><span class="n">CudaBuffer</span><span class="o">.</span><span class="n">from_numba</span><span class="p">(</span><span class="n">device_arr</span><span class="o">.</span><span class="n">gpu_data</span><span class="p">)</span> |
| <span class="gp">>>> </span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">size</span> |
| <span class="go">16</span> |
| <span class="gp">>>> </span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">address</span> |
| <span class="go">30088364032</span> |
| <span class="gp">>>> </span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">context</span><span class="o">.</span><span class="n">device_number</span> |
| <span class="go">0</span> |
| </pre></div> |
| </div> |
| <p>Of course, we can copy the CUDA buffer back to host memory:</p> |
| <div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="n">np</span><span class="o">.</span><span class="n">frombuffer</span><span class="p">(</span><span class="n">cuda_buf</span><span class="o">.</span><span class="n">copy_to_host</span><span class="p">(),</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span> |
| <span class="go">array([10, 11, 12, 13], dtype=int32)</span> |
| </pre></div> |
| </div> |
| <div class="admonition seealso"> |
| <p class="admonition-title">See also</p> |
| <p>Documentation for Numba’s <a class="reference external" href="https://numba.pydata.org/numba-doc/latest/cuda/index.html">CUDA support</a>.</p> |
| </div> |
| </div> |
| </div> |
| </div> |
| |
| |
| </div> |
| |
| </div> |
| <footer> |
| <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation"> |
| <a href="extending_types.html" class="btn btn-neutral float-right" title="Extending pyarrow" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a> |
| <a href="dataset.html" class="btn btn-neutral float-left" title="Tabular Datasets" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a> |
| </div> |
| |
| <hr/> |
| |
| <div role="contentinfo"> |
| <p> |
| © Copyright 2016-2019 Apache Software Foundation. |
| |
| </p> |
| </div> |
| |
| |
| |
| Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a |
| |
| <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a> |
| |
| provided by <a href="https://readthedocs.org">Read the Docs</a>. |
| |
| </footer> |
| </div> |
| </div> |
| |
| </section> |
| |
| </div> |
| |
| |
| <script type="text/javascript"> |
| jQuery(function () { |
| SphinxRtdTheme.Navigation.enable(true); |
| }); |
| </script> |
| |
| |
| |
| |
| |
| |
| |
| <script type="text/javascript" src="/docs/_static/versionwarning.js"></script></body> |
| </html> |