blob: 60689df3e993cf0a0054c9a0e9f5aeec757db536 [file] [log] [blame]
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>apache_beam.ml.inference.pytorch_inference &mdash; Apache Beam 2.47.0 documentation</title>
<script type="text/javascript" src="../../../../_static/js/modernizr.min.js"></script>
<script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
<script type="text/javascript" src="../../../../_static/jquery.js"></script>
<script type="text/javascript" src="../../../../_static/underscore.js"></script>
<script type="text/javascript" src="../../../../_static/doctools.js"></script>
<script type="text/javascript" src="../../../../_static/language_data.js"></script>
<script async="async" type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/javascript" src="../../../../_static/js/theme.js"></script>
<link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
<link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />
<link rel="index" title="Index" href="../../../../genindex.html" />
<link rel="search" title="Search" href="../../../../search.html" />
</head>
<body class="wy-body-for-nav">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search" >
<a href="../../../../index.html" class="icon icon-home"> Apache Beam
</a>
<div class="version">
2.47.0
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
<input type="text" name="q" placeholder="Search docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.coders.html">apache_beam.coders package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.dataframe.html">apache_beam.dataframe package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.io.html">apache_beam.io package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.metrics.html">apache_beam.metrics package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.ml.html">apache_beam.ml package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.options.html">apache_beam.options package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.portability.html">apache_beam.portability package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.runners.html">apache_beam.runners package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.testing.html">apache_beam.testing package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.transforms.html">apache_beam.transforms package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.typehints.html">apache_beam.typehints package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.utils.html">apache_beam.utils package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.yaml.html">apache_beam.yaml package</a></li>
</ul>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.error.html">apache_beam.error module</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.pipeline.html">apache_beam.pipeline module</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../apache_beam.pvalue.html">apache_beam.pvalue module</a></li>
</ul>
</div>
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
<nav class="wy-nav-top" aria-label="top navigation">
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="../../../../index.html">Apache Beam</a>
</nav>
<div class="wy-nav-content">
<div class="rst-content">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="wy-breadcrumbs">
<li><a href="../../../../index.html">Docs</a> &raquo;</li>
<li><a href="../../../index.html">Module code</a> &raquo;</li>
<li>apache_beam.ml.inference.pytorch_inference</li>
<li class="wy-breadcrumbs-aside">
</li>
</ul>
<hr/>
</div>
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
<div itemprop="articleBody">
<h1>Source code for apache_beam.ml.inference.pytorch_inference</h1><div class="highlight"><pre>
<span></span><span class="c1">#</span>
<span class="c1"># Licensed to the Apache Software Foundation (ASF) under one or more</span>
<span class="c1"># contributor license agreements. See the NOTICE file distributed with</span>
<span class="c1"># this work for additional information regarding copyright ownership.</span>
<span class="c1"># The ASF licenses this file to You under the Apache License, Version 2.0</span>
<span class="c1"># (the &quot;License&quot;); you may not use this file except in compliance with</span>
<span class="c1"># the License. You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an &quot;AS IS&quot; BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>
<span class="c1"># limitations under the License.</span>
<span class="c1">#</span>
<span class="c1"># pytype: skip-file</span>
<span class="kn">import</span> <span class="nn">logging</span>
<span class="kn">from</span> <span class="nn">collections</span> <span class="kn">import</span> <span class="n">defaultdict</span>
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">Any</span>
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">Callable</span>
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">Dict</span>
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">Iterable</span>
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">Optional</span>
<span class="kn">from</span> <span class="nn">typing</span> <span class="kn">import</span> <span class="n">Sequence</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">from</span> <span class="nn">apache_beam.io.filesystems</span> <span class="kn">import</span> <span class="n">FileSystems</span>
<span class="kn">from</span> <span class="nn">apache_beam.ml.inference</span> <span class="kn">import</span> <span class="n">utils</span>
<span class="kn">from</span> <span class="nn">apache_beam.ml.inference.base</span> <span class="kn">import</span> <span class="n">ModelHandler</span>
<span class="kn">from</span> <span class="nn">apache_beam.ml.inference.base</span> <span class="kn">import</span> <span class="n">PredictionResult</span>
<span class="kn">from</span> <span class="nn">apache_beam.utils.annotations</span> <span class="kn">import</span> <span class="n">experimental</span>
<span class="n">__all__</span> <span class="o">=</span> <span class="p">[</span>
<span class="s1">&#39;PytorchModelHandlerTensor&#39;</span><span class="p">,</span>
<span class="s1">&#39;PytorchModelHandlerKeyedTensor&#39;</span><span class="p">,</span>
<span class="p">]</span>
<span class="n">TensorInferenceFn</span> <span class="o">=</span> <span class="n">Callable</span><span class="p">[[</span>
<span class="n">Sequence</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">,</span>
<span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]],</span>
<span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span>
<span class="p">],</span>
<span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]]</span>
<span class="n">KeyedTensorInferenceFn</span> <span class="o">=</span> <span class="n">Callable</span><span class="p">[[</span>
<span class="n">Sequence</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">]],</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">,</span>
<span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]],</span>
<span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span>
<span class="p">],</span>
<span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]]</span>
<span class="k">def</span> <span class="nf">_validate_constructor_args</span><span class="p">(</span>
<span class="n">state_dict_path</span><span class="p">,</span> <span class="n">model_class</span><span class="p">,</span> <span class="n">torch_script_model_path</span><span class="p">):</span>
<span class="n">message</span> <span class="o">=</span> <span class="p">(</span>
<span class="s2">&quot;A </span><span class="si">{param1}</span><span class="s2"> has been supplied to the model &quot;</span>
<span class="s2">&quot;handler, but the required </span><span class="si">{param2}</span><span class="s2"> is missing. &quot;</span>
<span class="s2">&quot;Please provide the </span><span class="si">{param2}</span><span class="s2"> in order to &quot;</span>
<span class="s2">&quot;successfully load the </span><span class="si">{param1}</span><span class="s2">.&quot;</span><span class="p">)</span>
<span class="c1"># state_dict_path and model_class are coupled with each other</span>
<span class="c1"># raise RuntimeError if user forgets to pass any one of them.</span>
<span class="k">if</span> <span class="n">state_dict_path</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">model_class</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="n">message</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">param1</span><span class="o">=</span><span class="s2">&quot;state_dict_path&quot;</span><span class="p">,</span> <span class="n">param2</span><span class="o">=</span><span class="s2">&quot;model_class&quot;</span><span class="p">))</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">state_dict_path</span> <span class="ow">and</span> <span class="n">model_class</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="n">message</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">param1</span><span class="o">=</span><span class="s2">&quot;model_class&quot;</span><span class="p">,</span> <span class="n">param2</span><span class="o">=</span><span class="s2">&quot;state_dict_path&quot;</span><span class="p">))</span>
<span class="k">if</span> <span class="n">torch_script_model_path</span> <span class="ow">and</span> <span class="n">state_dict_path</span><span class="p">:</span>
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span>
<span class="s2">&quot;Please specify either torch_script_model_path or &quot;</span>
<span class="s2">&quot;(state_dict_path, model_class) to successfully load the model.&quot;</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">_load_model</span><span class="p">(</span>
<span class="n">model_class</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Callable</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">]],</span>
<span class="n">state_dict_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">],</span>
<span class="n">device</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">,</span>
<span class="n">model_params</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]],</span>
<span class="n">torch_script_model_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]):</span>
<span class="k">if</span> <span class="n">device</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cuda&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">is_available</span><span class="p">():</span>
<span class="n">logging</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span>
<span class="s2">&quot;Model handler specified a &#39;GPU&#39; device, but GPUs are not available. &quot;</span>
<span class="s2">&quot;Switching to CPU.&quot;</span><span class="p">)</span>
<span class="n">device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cpu&#39;</span><span class="p">)</span>
<span class="k">try</span><span class="p">:</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span>
<span class="s2">&quot;Loading state_dict_path </span><span class="si">%s</span><span class="s2"> onto a </span><span class="si">%s</span><span class="s2"> device&quot;</span><span class="p">,</span> <span class="n">state_dict_path</span><span class="p">,</span> <span class="n">device</span><span class="p">)</span>
<span class="k">if</span> <span class="ow">not</span> <span class="n">torch_script_model_path</span><span class="p">:</span>
<span class="n">file</span> <span class="o">=</span> <span class="n">FileSystems</span><span class="o">.</span><span class="n">open</span><span class="p">(</span><span class="n">state_dict_path</span><span class="p">,</span> <span class="s1">&#39;rb&#39;</span><span class="p">)</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">model_class</span><span class="p">(</span><span class="o">**</span><span class="n">model_params</span><span class="p">)</span> <span class="c1"># type: ignore[arg-type,misc]</span>
<span class="n">state_dict</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">file</span><span class="p">,</span> <span class="n">map_location</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="n">model</span><span class="o">.</span><span class="n">load_state_dict</span><span class="p">(</span><span class="n">state_dict</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">file</span> <span class="o">=</span> <span class="n">FileSystems</span><span class="o">.</span><span class="n">open</span><span class="p">(</span><span class="n">torch_script_model_path</span><span class="p">,</span> <span class="s1">&#39;rb&#39;</span><span class="p">)</span>
<span class="n">model</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">load</span><span class="p">(</span><span class="n">file</span><span class="p">,</span> <span class="n">map_location</span><span class="o">=</span><span class="n">device</span><span class="p">)</span>
<span class="k">except</span> <span class="ne">RuntimeError</span> <span class="k">as</span> <span class="n">e</span><span class="p">:</span>
<span class="k">if</span> <span class="n">device</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cuda&#39;</span><span class="p">):</span>
<span class="n">message</span> <span class="o">=</span> <span class="s2">&quot;Loading the model onto a GPU device failed due to an &quot;</span> \
<span class="sa">f</span><span class="s2">&quot;exception:</span><span class="se">\n</span><span class="si">{</span><span class="n">e</span><span class="si">}</span><span class="se">\n</span><span class="s2">Attempting to load onto a CPU device instead.&quot;</span>
<span class="n">logging</span><span class="o">.</span><span class="n">warning</span><span class="p">(</span><span class="n">message</span><span class="p">)</span>
<span class="k">return</span> <span class="n">_load_model</span><span class="p">(</span>
<span class="n">model_class</span><span class="p">,</span>
<span class="n">state_dict_path</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cpu&#39;</span><span class="p">),</span>
<span class="n">model_params</span><span class="p">,</span>
<span class="n">torch_script_model_path</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="k">raise</span> <span class="n">e</span>
<span class="n">model</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="n">model</span><span class="o">.</span><span class="n">eval</span><span class="p">()</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s2">&quot;Finished loading PyTorch model.&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="n">model</span><span class="p">,</span> <span class="n">device</span>
<span class="k">def</span> <span class="nf">_convert_to_device</span><span class="p">(</span><span class="n">examples</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">,</span> <span class="n">device</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Converts samples to a style matching given device.</span>
<span class="sd"> **NOTE:** A user may pass in device=&#39;GPU&#39; but if GPU is not detected in the</span>
<span class="sd"> environment it must be converted back to CPU.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">if</span> <span class="n">examples</span><span class="o">.</span><span class="n">device</span> <span class="o">!=</span> <span class="n">device</span><span class="p">:</span>
<span class="n">examples</span> <span class="o">=</span> <span class="n">examples</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="p">)</span>
<span class="k">return</span> <span class="n">examples</span>
<span class="k">def</span> <span class="nf">default_tensor_inference_fn</span><span class="p">(</span>
<span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
<span class="n">model</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">device</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_id</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]:</span>
<span class="c1"># torch.no_grad() mitigates GPU memory issues</span>
<span class="c1"># https://github.com/apache/beam/issues/22811</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">stack</span><span class="p">(</span><span class="n">batch</span><span class="p">)</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">_convert_to_device</span><span class="p">(</span><span class="n">batched_tensors</span><span class="p">,</span> <span class="n">device</span><span class="p">)</span>
<span class="n">predictions</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">batched_tensors</span><span class="p">,</span> <span class="o">**</span><span class="n">inference_args</span><span class="p">)</span>
<span class="k">return</span> <span class="n">utils</span><span class="o">.</span><span class="n">_convert_to_result</span><span class="p">(</span><span class="n">batch</span><span class="p">,</span> <span class="n">predictions</span><span class="p">,</span> <span class="n">model_id</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">make_tensor_model_fn</span><span class="p">(</span><span class="n">model_fn</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">TensorInferenceFn</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Produces a TensorInferenceFn that uses a method of the model other that</span>
<span class="sd"> the forward() method.</span>
<span class="sd"> Args:</span>
<span class="sd"> model_fn: A string name of the method to be used. This is accessed through</span>
<span class="sd"> getattr(model, model_fn)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="nf">attr_fn</span><span class="p">(</span>
<span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
<span class="n">model</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">device</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_id</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]:</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">stack</span><span class="p">(</span><span class="n">batch</span><span class="p">)</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">_convert_to_device</span><span class="p">(</span><span class="n">batched_tensors</span><span class="p">,</span> <span class="n">device</span><span class="p">)</span>
<span class="n">pred_fn</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">model_fn</span><span class="p">)</span>
<span class="n">predictions</span> <span class="o">=</span> <span class="n">pred_fn</span><span class="p">(</span><span class="n">batched_tensors</span><span class="p">,</span> <span class="o">**</span><span class="n">inference_args</span><span class="p">)</span>
<span class="k">return</span> <span class="n">utils</span><span class="o">.</span><span class="n">_convert_to_result</span><span class="p">(</span><span class="n">batch</span><span class="p">,</span> <span class="n">predictions</span><span class="p">,</span> <span class="n">model_id</span><span class="p">)</span>
<span class="k">return</span> <span class="n">attr_fn</span>
<div class="viewcode-block" id="PytorchModelHandlerTensor"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor">[docs]</a><span class="k">class</span> <span class="nc">PytorchModelHandlerTensor</span><span class="p">(</span><span class="n">ModelHandler</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">,</span>
<span class="n">PredictionResult</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">]):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">state_dict_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_class</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Callable</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_params</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">device</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s1">&#39;CPU&#39;</span><span class="p">,</span>
<span class="o">*</span><span class="p">,</span>
<span class="n">inference_fn</span><span class="p">:</span> <span class="n">TensorInferenceFn</span> <span class="o">=</span> <span class="n">default_tensor_inference_fn</span><span class="p">,</span>
<span class="n">torch_script_model_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">min_batch_size</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">max_batch_size</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Implementation of the ModelHandler interface for PyTorch.</span>
<span class="sd"> Example Usage for torch model::</span>
<span class="sd"> pcoll | RunInference(PytorchModelHandlerTensor(state_dict_path=&quot;my_uri&quot;,</span>
<span class="sd"> model_class=&quot;my_class&quot;))</span>
<span class="sd"> Example Usage for torchscript model::</span>
<span class="sd"> pcoll | RunInference(PytorchModelHandlerTensor(</span>
<span class="sd"> torch_script_model_path=&quot;my_uri&quot;))</span>
<span class="sd"> See https://pytorch.org/tutorials/beginner/saving_loading_models.html</span>
<span class="sd"> for details</span>
<span class="sd"> Args:</span>
<span class="sd"> state_dict_path: path to the saved dictionary of the model state.</span>
<span class="sd"> model_class: class of the Pytorch model that defines the model</span>
<span class="sd"> structure.</span>
<span class="sd"> model_params: A dictionary of arguments required to instantiate the model</span>
<span class="sd"> class.</span>
<span class="sd"> device: the device on which you wish to run the model. If</span>
<span class="sd"> ``device = GPU`` then a GPU device will be used if it is available.</span>
<span class="sd"> Otherwise, it will be CPU.</span>
<span class="sd"> inference_fn: the inference function to use during RunInference.</span>
<span class="sd"> default=_default_tensor_inference_fn</span>
<span class="sd"> torch_script_model_path: Path to the torch script model.</span>
<span class="sd"> the model will be loaded using `torch.jit.load()`.</span>
<span class="sd"> `state_dict_path`, `model_class` and `model_params`</span>
<span class="sd"> arguments will be disregarded.</span>
<span class="sd"> min_batch_size: the minimum batch size to use when batching inputs. This</span>
<span class="sd"> batch will be fed into the inference_fn as a Sequence of Tensors.</span>
<span class="sd"> max_batch_size: the maximum batch size to use when batching inputs. This</span>
<span class="sd"> batch will be fed into the inference_fn as a Sequence of Tensors.</span>
<span class="sd"> **Supported Versions:** RunInference APIs in Apache Beam have been tested</span>
<span class="sd"> with PyTorch 1.9 and 1.10.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span> <span class="o">=</span> <span class="n">state_dict_path</span>
<span class="k">if</span> <span class="n">device</span> <span class="o">==</span> <span class="s1">&#39;GPU&#39;</span><span class="p">:</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s2">&quot;Device is set to CUDA&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cuda&#39;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s2">&quot;Device is set to CPU&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cpu&#39;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_class</span> <span class="o">=</span> <span class="n">model_class</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_params</span> <span class="o">=</span> <span class="n">model_params</span> <span class="k">if</span> <span class="n">model_params</span> <span class="k">else</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_inference_fn</span> <span class="o">=</span> <span class="n">inference_fn</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">if</span> <span class="n">min_batch_size</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span><span class="p">[</span><span class="s1">&#39;min_batch_size&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">min_batch_size</span>
<span class="k">if</span> <span class="n">max_batch_size</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span><span class="p">[</span><span class="s1">&#39;max_batch_size&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">max_batch_size</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span> <span class="o">=</span> <span class="n">torch_script_model_path</span>
<span class="n">_validate_constructor_args</span><span class="p">(</span>
<span class="n">state_dict_path</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span><span class="p">,</span>
<span class="n">model_class</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_model_class</span><span class="p">,</span>
<span class="n">torch_script_model_path</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">)</span>
<div class="viewcode-block" id="PytorchModelHandlerTensor.load_model"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor.load_model">[docs]</a> <span class="k">def</span> <span class="nf">load_model</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Loads and initializes a Pytorch model for processing.&quot;&quot;&quot;</span>
<span class="n">model</span><span class="p">,</span> <span class="n">device</span> <span class="o">=</span> <span class="n">_load_model</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_class</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_params</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span> <span class="o">=</span> <span class="n">device</span>
<span class="k">return</span> <span class="n">model</span></div>
<div class="viewcode-block" id="PytorchModelHandlerTensor.update_model_path"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor.update_model_path">[docs]</a> <span class="k">def</span> <span class="nf">update_model_path</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">model_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">model_path</span> <span class="k">if</span> <span class="n">model_path</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">model_path</span> <span class="k">if</span> <span class="n">model_path</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span><span class="p">)</span></div>
<div class="viewcode-block" id="PytorchModelHandlerTensor.run_inference"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor.run_inference">[docs]</a> <span class="k">def</span> <span class="nf">run_inference</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
<span class="n">model</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Runs inferences on a batch of Tensors and returns an Iterable of</span>
<span class="sd"> Tensor Predictions.</span>
<span class="sd"> This method stacks the list of Tensors in a vectorized format to optimize</span>
<span class="sd"> the inference call.</span>
<span class="sd"> Args:</span>
<span class="sd"> batch: A sequence of Tensors. These Tensors should be batchable, as this</span>
<span class="sd"> method will call `torch.stack()` and pass in batched Tensors with</span>
<span class="sd"> dimensions (batch_size, n_features, etc.) into the model&#39;s forward()</span>
<span class="sd"> function.</span>
<span class="sd"> model: A PyTorch model.</span>
<span class="sd"> inference_args: Non-batchable arguments required as inputs to the model&#39;s</span>
<span class="sd"> forward() function. Unlike Tensors in `batch`, these parameters will</span>
<span class="sd"> not be dynamically batched</span>
<span class="sd"> Returns:</span>
<span class="sd"> An Iterable of type PredictionResult.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">inference_args</span> <span class="o">=</span> <span class="p">{}</span> <span class="k">if</span> <span class="ow">not</span> <span class="n">inference_args</span> <span class="k">else</span> <span class="n">inference_args</span>
<span class="n">model_id</span> <span class="o">=</span> <span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_inference_fn</span><span class="p">(</span>
<span class="n">batch</span><span class="p">,</span> <span class="n">model</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_device</span><span class="p">,</span> <span class="n">inference_args</span><span class="p">,</span> <span class="n">model_id</span><span class="p">)</span></div>
<div class="viewcode-block" id="PytorchModelHandlerTensor.get_num_bytes"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor.get_num_bytes">[docs]</a> <span class="k">def</span> <span class="nf">get_num_bytes</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="nb">int</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Returns:</span>
<span class="sd"> The number of bytes of data for a batch of Tensors.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="nb">sum</span><span class="p">((</span><span class="n">el</span><span class="o">.</span><span class="n">element_size</span><span class="p">()</span> <span class="k">for</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">batch</span> <span class="k">for</span> <span class="n">el</span> <span class="ow">in</span> <span class="n">tensor</span><span class="p">))</span></div>
<div class="viewcode-block" id="PytorchModelHandlerTensor.get_metrics_namespace"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor.get_metrics_namespace">[docs]</a> <span class="k">def</span> <span class="nf">get_metrics_namespace</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Returns:</span>
<span class="sd"> A namespace for metrics collected by the RunInference transform.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="s1">&#39;BeamML_PyTorch&#39;</span></div>
<div class="viewcode-block" id="PytorchModelHandlerTensor.validate_inference_args"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor.validate_inference_args">[docs]</a> <span class="k">def</span> <span class="nf">validate_inference_args</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]):</span>
<span class="k">pass</span></div>
<div class="viewcode-block" id="PytorchModelHandlerTensor.batch_elements_kwargs"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerTensor.batch_elements_kwargs">[docs]</a> <span class="k">def</span> <span class="nf">batch_elements_kwargs</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span></div></div>
<span class="k">def</span> <span class="nf">default_keyed_tensor_inference_fn</span><span class="p">(</span>
<span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">]],</span>
<span class="n">model</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">device</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_id</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]:</span>
<span class="c1"># If elements in `batch` are provided as a dictionaries from key to Tensors,</span>
<span class="c1"># then iterate through the batch list, and group Tensors to the same key</span>
<span class="n">key_to_tensor_list</span> <span class="o">=</span> <span class="n">defaultdict</span><span class="p">(</span><span class="nb">list</span><span class="p">)</span>
<span class="c1"># torch.no_grad() mitigates GPU memory issues</span>
<span class="c1"># https://github.com/apache/beam/issues/22811</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="k">for</span> <span class="n">example</span> <span class="ow">in</span> <span class="n">batch</span><span class="p">:</span>
<span class="k">for</span> <span class="n">key</span><span class="p">,</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">example</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="n">key_to_tensor_list</span><span class="p">[</span><span class="n">key</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span>
<span class="n">key_to_batched_tensors</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">for</span> <span class="n">key</span> <span class="ow">in</span> <span class="n">key_to_tensor_list</span><span class="p">:</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">stack</span><span class="p">(</span><span class="n">key_to_tensor_list</span><span class="p">[</span><span class="n">key</span><span class="p">])</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">_convert_to_device</span><span class="p">(</span><span class="n">batched_tensors</span><span class="p">,</span> <span class="n">device</span><span class="p">)</span>
<span class="n">key_to_batched_tensors</span><span class="p">[</span><span class="n">key</span><span class="p">]</span> <span class="o">=</span> <span class="n">batched_tensors</span>
<span class="n">predictions</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="o">**</span><span class="n">key_to_batched_tensors</span><span class="p">,</span> <span class="o">**</span><span class="n">inference_args</span><span class="p">)</span>
<span class="k">return</span> <span class="n">utils</span><span class="o">.</span><span class="n">_convert_to_result</span><span class="p">(</span><span class="n">batch</span><span class="p">,</span> <span class="n">predictions</span><span class="p">,</span> <span class="n">model_id</span><span class="p">)</span>
<span class="k">def</span> <span class="nf">make_keyed_tensor_model_fn</span><span class="p">(</span><span class="n">model_fn</span><span class="p">:</span> <span class="nb">str</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">KeyedTensorInferenceFn</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Produces a KeyedTensorInferenceFn that uses a method of the model other that</span>
<span class="sd"> the forward() method.</span>
<span class="sd"> Args:</span>
<span class="sd"> model_fn: A string name of the method to be used. This is accessed through</span>
<span class="sd"> getattr(model, model_fn)</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="nf">attr_fn</span><span class="p">(</span>
<span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">]],</span>
<span class="n">model</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">device</span><span class="p">:</span> <span class="nb">str</span><span class="p">,</span>
<span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_id</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]:</span>
<span class="c1"># If elements in `batch` are provided as a dictionaries from key to Tensors,</span>
<span class="c1"># then iterate through the batch list, and group Tensors to the same key</span>
<span class="n">key_to_tensor_list</span> <span class="o">=</span> <span class="n">defaultdict</span><span class="p">(</span><span class="nb">list</span><span class="p">)</span>
<span class="c1"># torch.no_grad() mitigates GPU memory issues</span>
<span class="c1"># https://github.com/apache/beam/issues/22811</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
<span class="k">for</span> <span class="n">example</span> <span class="ow">in</span> <span class="n">batch</span><span class="p">:</span>
<span class="k">for</span> <span class="n">key</span><span class="p">,</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">example</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
<span class="n">key_to_tensor_list</span><span class="p">[</span><span class="n">key</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">tensor</span><span class="p">)</span>
<span class="n">key_to_batched_tensors</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">for</span> <span class="n">key</span> <span class="ow">in</span> <span class="n">key_to_tensor_list</span><span class="p">:</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">stack</span><span class="p">(</span><span class="n">key_to_tensor_list</span><span class="p">[</span><span class="n">key</span><span class="p">])</span>
<span class="n">batched_tensors</span> <span class="o">=</span> <span class="n">_convert_to_device</span><span class="p">(</span><span class="n">batched_tensors</span><span class="p">,</span> <span class="n">device</span><span class="p">)</span>
<span class="n">key_to_batched_tensors</span><span class="p">[</span><span class="n">key</span><span class="p">]</span> <span class="o">=</span> <span class="n">batched_tensors</span>
<span class="n">pred_fn</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">model_fn</span><span class="p">)</span>
<span class="n">predictions</span> <span class="o">=</span> <span class="n">pred_fn</span><span class="p">(</span><span class="o">**</span><span class="n">key_to_batched_tensors</span><span class="p">,</span> <span class="o">**</span><span class="n">inference_args</span><span class="p">)</span>
<span class="k">return</span> <span class="n">utils</span><span class="o">.</span><span class="n">_convert_to_result</span><span class="p">(</span><span class="n">batch</span><span class="p">,</span> <span class="n">predictions</span><span class="p">,</span> <span class="n">model_id</span><span class="p">)</span>
<span class="k">return</span> <span class="n">attr_fn</span>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor">[docs]</a><span class="nd">@experimental</span><span class="p">(</span><span class="n">extra_message</span><span class="o">=</span><span class="s2">&quot;No backwards-compatibility guarantees.&quot;</span><span class="p">)</span>
<span class="k">class</span> <span class="nc">PytorchModelHandlerKeyedTensor</span><span class="p">(</span><span class="n">ModelHandler</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">],</span>
<span class="n">PredictionResult</span><span class="p">,</span>
<span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">]):</span>
<span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">state_dict_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_class</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Callable</span><span class="p">[</span><span class="o">...</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">model_params</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">device</span><span class="p">:</span> <span class="nb">str</span> <span class="o">=</span> <span class="s1">&#39;CPU&#39;</span><span class="p">,</span>
<span class="o">*</span><span class="p">,</span>
<span class="n">inference_fn</span><span class="p">:</span> <span class="n">KeyedTensorInferenceFn</span> <span class="o">=</span> <span class="n">default_keyed_tensor_inference_fn</span><span class="p">,</span>
<span class="n">torch_script_model_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">min_batch_size</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
<span class="n">max_batch_size</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">int</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Implementation of the ModelHandler interface for PyTorch.</span>
<span class="sd"> Example Usage for torch model::</span>
<span class="sd"> pcoll | RunInference(PytorchModelHandlerKeyedTensor(</span>
<span class="sd"> state_dict_path=&quot;my_uri&quot;,</span>
<span class="sd"> model_class=&quot;my_class&quot;))</span>
<span class="sd"> Example Usage for torchscript model::</span>
<span class="sd"> pcoll | RunInference(PytorchModelHandlerKeyedTensor(</span>
<span class="sd"> torch_script_model_path=&quot;my_uri&quot;))</span>
<span class="sd"> **NOTE:** This API and its implementation are under development and</span>
<span class="sd"> do not provide backward compatibility guarantees.</span>
<span class="sd"> See https://pytorch.org/tutorials/beginner/saving_loading_models.html</span>
<span class="sd"> for details</span>
<span class="sd"> Args:</span>
<span class="sd"> state_dict_path: path to the saved dictionary of the model state.</span>
<span class="sd"> model_class: class of the Pytorch model that defines the model</span>
<span class="sd"> structure.</span>
<span class="sd"> model_params: A dictionary of arguments required to instantiate the model</span>
<span class="sd"> class.</span>
<span class="sd"> device: the device on which you wish to run the model. If</span>
<span class="sd"> ``device = GPU`` then a GPU device will be used if it is available.</span>
<span class="sd"> Otherwise, it will be CPU.</span>
<span class="sd"> inference_fn: the function to invoke on run_inference.</span>
<span class="sd"> default = default_keyed_tensor_inference_fn</span>
<span class="sd"> torch_script_model_path: Path to the torch script model.</span>
<span class="sd"> the model will be loaded using `torch.jit.load()`.</span>
<span class="sd"> `state_dict_path`, `model_class` and `model_params`</span>
<span class="sd"> arguments will be disregarded..</span>
<span class="sd"> min_batch_size: the minimum batch size to use when batching inputs. This</span>
<span class="sd"> batch will be fed into the inference_fn as a Sequence of Keyed Tensors.</span>
<span class="sd"> max_batch_size: the maximum batch size to use when batching inputs. This</span>
<span class="sd"> batch will be fed into the inference_fn as a Sequence of Keyed Tensors.</span>
<span class="sd"> **Supported Versions:** RunInference APIs in Apache Beam have been tested</span>
<span class="sd"> on torch&gt;=1.9.0,&lt;1.14.0.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span> <span class="o">=</span> <span class="n">state_dict_path</span>
<span class="k">if</span> <span class="n">device</span> <span class="o">==</span> <span class="s1">&#39;GPU&#39;</span><span class="p">:</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s2">&quot;Device is set to CUDA&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cuda&#39;</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">logging</span><span class="o">.</span><span class="n">info</span><span class="p">(</span><span class="s2">&quot;Device is set to CPU&quot;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">&#39;cpu&#39;</span><span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_class</span> <span class="o">=</span> <span class="n">model_class</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_params</span> <span class="o">=</span> <span class="n">model_params</span> <span class="k">if</span> <span class="n">model_params</span> <span class="k">else</span> <span class="p">{}</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_inference_fn</span> <span class="o">=</span> <span class="n">inference_fn</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span> <span class="o">=</span> <span class="p">{}</span>
<span class="k">if</span> <span class="n">min_batch_size</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span><span class="p">[</span><span class="s1">&#39;min_batch_size&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">min_batch_size</span>
<span class="k">if</span> <span class="n">max_batch_size</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span><span class="p">[</span><span class="s1">&#39;max_batch_size&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">max_batch_size</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span> <span class="o">=</span> <span class="n">torch_script_model_path</span>
<span class="n">_validate_constructor_args</span><span class="p">(</span>
<span class="n">state_dict_path</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span><span class="p">,</span>
<span class="n">model_class</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_model_class</span><span class="p">,</span>
<span class="n">torch_script_model_path</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">)</span>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor.load_model"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor.load_model">[docs]</a> <span class="k">def</span> <span class="nf">load_model</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;Loads and initializes a Pytorch model for processing.&quot;&quot;&quot;</span>
<span class="n">model</span><span class="p">,</span> <span class="n">device</span> <span class="o">=</span> <span class="n">_load_model</span><span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_class</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_model_params</span><span class="p">,</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span>
<span class="p">)</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_device</span> <span class="o">=</span> <span class="n">device</span>
<span class="k">return</span> <span class="n">model</span></div>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor.update_model_path"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor.update_model_path">[docs]</a> <span class="k">def</span> <span class="nf">update_model_path</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">model_path</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">str</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">):</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">model_path</span> <span class="k">if</span> <span class="n">model_path</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">)</span>
<span class="k">else</span><span class="p">:</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span> <span class="o">=</span> <span class="p">(</span>
<span class="n">model_path</span> <span class="k">if</span> <span class="n">model_path</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span><span class="p">)</span></div>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor.run_inference"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor.run_inference">[docs]</a> <span class="k">def</span> <span class="nf">run_inference</span><span class="p">(</span>
<span class="bp">self</span><span class="p">,</span>
<span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">]],</span>
<span class="n">model</span><span class="p">:</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Module</span><span class="p">,</span>
<span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span>
<span class="p">)</span> <span class="o">-&gt;</span> <span class="n">Iterable</span><span class="p">[</span><span class="n">PredictionResult</span><span class="p">]:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Runs inferences on a batch of Keyed Tensors and returns an Iterable of</span>
<span class="sd"> Tensor Predictions.</span>
<span class="sd"> For the same key across all examples, this will stack all Tensors values</span>
<span class="sd"> in a vectorized format to optimize the inference call.</span>
<span class="sd"> Args:</span>
<span class="sd"> batch: A sequence of keyed Tensors. These Tensors should be batchable,</span>
<span class="sd"> as this method will call `torch.stack()` and pass in batched Tensors</span>
<span class="sd"> with dimensions (batch_size, n_features, etc.) into the model&#39;s</span>
<span class="sd"> forward() function.</span>
<span class="sd"> model: A PyTorch model.</span>
<span class="sd"> inference_args: Non-batchable arguments required as inputs to the model&#39;s</span>
<span class="sd"> forward() function. Unlike Tensors in `batch`, these parameters will</span>
<span class="sd"> not be dynamically batched</span>
<span class="sd"> Returns:</span>
<span class="sd"> An Iterable of type PredictionResult.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="n">inference_args</span> <span class="o">=</span> <span class="p">{}</span> <span class="k">if</span> <span class="ow">not</span> <span class="n">inference_args</span> <span class="k">else</span> <span class="n">inference_args</span>
<span class="n">model_id</span> <span class="o">=</span> <span class="p">(</span>
<span class="bp">self</span><span class="o">.</span><span class="n">_state_dict_path</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span> <span class="k">else</span> <span class="bp">self</span><span class="o">.</span><span class="n">_torch_script_model_path</span><span class="p">)</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_inference_fn</span><span class="p">(</span>
<span class="n">batch</span><span class="p">,</span> <span class="n">model</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">_device</span><span class="p">,</span> <span class="n">inference_args</span><span class="p">,</span> <span class="n">model_id</span><span class="p">)</span></div>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor.get_num_bytes"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor.get_num_bytes">[docs]</a> <span class="k">def</span> <span class="nf">get_num_bytes</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">batch</span><span class="p">:</span> <span class="n">Sequence</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">])</span> <span class="o">-&gt;</span> <span class="nb">int</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Returns:</span>
<span class="sd"> The number of bytes of data for a batch of Dict of Tensors.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="c1"># If elements in `batch` are provided as a dictionaries from key to Tensors</span>
<span class="k">return</span> <span class="nb">sum</span><span class="p">(</span>
<span class="p">(</span><span class="n">el</span><span class="o">.</span><span class="n">element_size</span><span class="p">()</span> <span class="k">for</span> <span class="n">tensor</span> <span class="ow">in</span> <span class="n">batch</span> <span class="k">for</span> <span class="n">el</span> <span class="ow">in</span> <span class="n">tensor</span><span class="o">.</span><span class="n">values</span><span class="p">()))</span></div>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor.get_metrics_namespace"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor.get_metrics_namespace">[docs]</a> <span class="k">def</span> <span class="nf">get_metrics_namespace</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">str</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> Returns:</span>
<span class="sd"> A namespace for metrics collected by the RunInference transform.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">return</span> <span class="s1">&#39;BeamML_PyTorch&#39;</span></div>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor.validate_inference_args"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor.validate_inference_args">[docs]</a> <span class="k">def</span> <span class="nf">validate_inference_args</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">inference_args</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Dict</span><span class="p">[</span><span class="nb">str</span><span class="p">,</span> <span class="n">Any</span><span class="p">]]):</span>
<span class="k">pass</span></div>
<div class="viewcode-block" id="PytorchModelHandlerKeyedTensor.batch_elements_kwargs"><a class="viewcode-back" href="../../../../apache_beam.ml.inference.pytorch_inference.html#apache_beam.ml.inference.pytorch_inference.PytorchModelHandlerKeyedTensor.batch_elements_kwargs">[docs]</a> <span class="k">def</span> <span class="nf">batch_elements_kwargs</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_batching_kwargs</span></div></div>
</pre></div>
</div>
</div>
<footer>
<hr/>
<div role="contentinfo">
<p>
&copy; Copyright
</p>
</div>
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
</div>
</div>
</section>
</div>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
</body>
</html>