blob: 7a1f27b8241541591e6f4534f88dca8bd6d8159c [file] [log] [blame]
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>apache_beam.ml.inference.tensorrt_inference module &mdash; Apache Beam 2.47.0 documentation</title>
<script type="text/javascript" src="_static/js/modernizr.min.js"></script>
<script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/language_data.js"></script>
<script async="async" type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/javascript" src="_static/js/theme.js"></script>
<link rel="stylesheet" href="_static/css/theme.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="apache_beam.ml.inference.utils module" href="apache_beam.ml.inference.utils.html" />
<link rel="prev" title="apache_beam.ml.inference.tensorflow_inference module" href="apache_beam.ml.inference.tensorflow_inference.html" />
</head>
<body class="wy-body-for-nav">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search" >
<a href="index.html" class="icon icon-home"> Apache Beam
</a>
<div class="version">
2.47.0
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
<input type="text" name="q" placeholder="Search docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="apache_beam.coders.html">apache_beam.coders package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.dataframe.html">apache_beam.dataframe package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.io.html">apache_beam.io package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.metrics.html">apache_beam.metrics package</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="apache_beam.ml.html">apache_beam.ml package</a><ul class="current">
<li class="toctree-l2 current"><a class="reference internal" href="apache_beam.ml.html#subpackages">Subpackages</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="apache_beam.ml.gcp.html">apache_beam.ml.gcp package</a></li>
<li class="toctree-l3 current"><a class="reference internal" href="apache_beam.ml.inference.html">apache_beam.ml.inference package</a><ul class="current">
<li class="toctree-l4 current"><a class="reference internal" href="apache_beam.ml.inference.html#submodules">Submodules</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.options.html">apache_beam.options package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.portability.html">apache_beam.portability package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.runners.html">apache_beam.runners package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.testing.html">apache_beam.testing package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.transforms.html">apache_beam.transforms package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.typehints.html">apache_beam.typehints package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.utils.html">apache_beam.utils package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.yaml.html">apache_beam.yaml package</a></li>
</ul>
<ul>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.error.html">apache_beam.error module</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.pipeline.html">apache_beam.pipeline module</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.pvalue.html">apache_beam.pvalue module</a></li>
</ul>
</div>
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
<nav class="wy-nav-top" aria-label="top navigation">
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="index.html">Apache Beam</a>
</nav>
<div class="wy-nav-content">
<div class="rst-content">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="wy-breadcrumbs">
<li><a href="index.html">Docs</a> &raquo;</li>
<li><a href="apache_beam.ml.html">apache_beam.ml package</a> &raquo;</li>
<li><a href="apache_beam.ml.inference.html">apache_beam.ml.inference package</a> &raquo;</li>
<li>apache_beam.ml.inference.tensorrt_inference module</li>
<li class="wy-breadcrumbs-aside">
<a href="_sources/apache_beam.ml.inference.tensorrt_inference.rst.txt" rel="nofollow"> View page source</a>
</li>
</ul>
<hr/>
</div>
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
<div itemprop="articleBody">
<div class="section" id="module-apache_beam.ml.inference.tensorrt_inference">
<span id="apache-beam-ml-inference-tensorrt-inference-module"></span><h1>apache_beam.ml.inference.tensorrt_inference module<a class="headerlink" href="#module-apache_beam.ml.inference.tensorrt_inference" title="Permalink to this headline"></a></h1>
<dl class="class">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngine">
<em class="property">class </em><code class="descclassname">apache_beam.ml.inference.tensorrt_inference.</code><code class="descname">TensorRTEngine</code><span class="sig-paren">(</span><em>engine: &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f2e2ddb4d60&gt;</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngine"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngine" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference external" href="https://docs.python.org/3/library/functions.html#object" title="(in Python v3.11)"><code class="xref py py-class docutils literal notranslate"><span class="pre">object</span></code></a></p>
<p>Implementation of the TensorRTEngine class which handles
allocations associated with TensorRT engine.</p>
<p>Example Usage:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">TensorRTEngine</span><span class="p">(</span><span class="n">engine</span><span class="p">)</span>
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>engine</strong> – trt.ICudaEngine object that contains TensorRT engine</td>
</tr>
</tbody>
</table>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngine.get_engine_attrs">
<code class="descname">get_engine_attrs</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngine.get_engine_attrs"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngine.get_engine_attrs" title="Permalink to this definition"></a></dt>
<dd><p>Returns TensorRT engine attributes.</p>
</dd></dl>
</dd></dl>
<dl class="class">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy">
<em class="property">class </em><code class="descclassname">apache_beam.ml.inference.tensorrt_inference.</code><code class="descname">TensorRTEngineHandlerNumPy</code><span class="sig-paren">(</span><em>min_batch_size: int, max_batch_size: int, *, inference_fn: Callable[[Sequence[numpy.ndarray], apache_beam.ml.inference.tensorrt_inference.TensorRTEngine, Optional[Dict[str, Any]]], Iterable[apache_beam.ml.inference.base.PredictionResult]] = &lt;function _default_tensorRT_inference_fn&gt;, **kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="apache_beam.ml.inference.base.html#apache_beam.ml.inference.base.ModelHandler" title="apache_beam.ml.inference.base.ModelHandler"><code class="xref py py-class docutils literal notranslate"><span class="pre">apache_beam.ml.inference.base.ModelHandler</span></code></a></p>
<p>Implementation of the ModelHandler interface for TensorRT.</p>
<p>Example Usage:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">pcoll</span> <span class="o">|</span> <span class="n">RunInference</span><span class="p">(</span>
<span class="n">TensorRTEngineHandlerNumPy</span><span class="p">(</span>
<span class="n">min_batch_size</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
<span class="n">max_batch_size</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
<span class="n">engine_path</span><span class="o">=</span><span class="s2">&quot;my_uri&quot;</span><span class="p">))</span>
</pre></div>
</div>
<p><strong>NOTE:</strong> This API and its implementation are under development and
do not provide backward compatibility guarantees.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>min_batch_size</strong> – minimum accepted batch size.</li>
<li><strong>max_batch_size</strong> – maximum accepted batch size.</li>
<li><strong>inference_fn</strong> – the inference function to use on RunInference calls.
default: _default_tensorRT_inference_fn</li>
<li><strong>kwargs</strong> – Additional arguments like ‘engine_path’ and ‘onnx_path’ are
currently supported.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p>See <a class="reference external" href="https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/">https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/</a>
for details</p>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.batch_elements_kwargs">
<code class="descname">batch_elements_kwargs</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy.batch_elements_kwargs"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.batch_elements_kwargs" title="Permalink to this definition"></a></dt>
<dd><p>Sets min_batch_size and max_batch_size of a TensorRT engine.</p>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.load_model">
<code class="descname">load_model</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; apache_beam.ml.inference.tensorrt_inference.TensorRTEngine<a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy.load_model"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.load_model" title="Permalink to this definition"></a></dt>
<dd><p>Loads and initializes a TensorRT engine for processing.</p>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.load_onnx">
<code class="descname">load_onnx</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; Tuple[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f2e2ddb4f70&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f2e2ddb4c10&gt;]<a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy.load_onnx"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.load_onnx" title="Permalink to this definition"></a></dt>
<dd><p>Loads and parses an onnx model for processing.</p>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.build_engine">
<code class="descname">build_engine</code><span class="sig-paren">(</span><em>network: &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f2e2e4684f0&gt;</em>, <em>builder: &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f2e2e468f70&gt;</em><span class="sig-paren">)</span> &#x2192; apache_beam.ml.inference.tensorrt_inference.TensorRTEngine<a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy.build_engine"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.build_engine" title="Permalink to this definition"></a></dt>
<dd><p>Build an engine according to parsed/created network.</p>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.run_inference">
<code class="descname">run_inference</code><span class="sig-paren">(</span><em>batch: Sequence[numpy.ndarray], engine: apache_beam.ml.inference.tensorrt_inference.TensorRTEngine, inference_args: Optional[Dict[str, Any]] = None</em><span class="sig-paren">)</span> &#x2192; Iterable[apache_beam.ml.inference.base.PredictionResult]<a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy.run_inference"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.run_inference" title="Permalink to this definition"></a></dt>
<dd><p>Runs inferences on a batch of Tensors and returns an Iterable of
TensorRT Predictions.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>batch</strong> – A np.ndarray or a np.ndarray that represents a concatenation
of multiple arrays as a batch.</li>
<li><strong>engine</strong> – A TensorRT engine.</li>
<li><strong>inference_args</strong> – Any additional arguments for an inference
that are not applicable to TensorRT.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">An Iterable of type PredictionResult.</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.get_num_bytes">
<code class="descname">get_num_bytes</code><span class="sig-paren">(</span><em>batch: Sequence[numpy.ndarray]</em><span class="sig-paren">)</span> &#x2192; int<a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy.get_num_bytes"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.get_num_bytes" title="Permalink to this definition"></a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">The number of bytes of data for a batch of Tensors.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.get_metrics_namespace">
<code class="descname">get_metrics_namespace</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; str<a class="reference internal" href="_modules/apache_beam/ml/inference/tensorrt_inference.html#TensorRTEngineHandlerNumPy.get_metrics_namespace"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.TensorRTEngineHandlerNumPy.get_metrics_namespace" title="Permalink to this definition"></a></dt>
<dd><p>Returns a namespace for metrics collected by the RunInference transform.</p>
</dd></dl>
</dd></dl>
<dl class="function">
<dt id="apache_beam.ml.inference.tensorrt_inference.experimental">
<code class="descclassname">apache_beam.ml.inference.tensorrt_inference.</code><code class="descname">experimental</code><span class="sig-paren">(</span><em>*</em>, <em>label='experimental'</em>, <em>since=None</em>, <em>current=None</em>, <em>extra_message=None</em>, <em>custom_message=None</em><span class="sig-paren">)</span><a class="headerlink" href="#apache_beam.ml.inference.tensorrt_inference.experimental" title="Permalink to this definition"></a></dt>
<dd><p>Decorates an API with a deprecated or experimental annotation.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>label</strong> – the kind of annotation (‘deprecated’ or ‘experimental’).</li>
<li><strong>since</strong> – the version that causes the annotation.</li>
<li><strong>current</strong> – the suggested replacement function.</li>
<li><strong>extra_message</strong> – an optional additional message.</li>
<li><strong>custom_message</strong> – if the default message does not suffice, the message
can be changed using this argument. A string
whit replacement tokens.
A replecement string is were the previus args will
be located on the custom message.
The following replacement strings can be used:
%name% -&gt; API.__name__
%since% -&gt; since (Mandatory for the decapreted annotation)
%current% -&gt; current
%extra% -&gt; extra_message</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">The decorator for the API.</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
</div>
</div>
</div>
<footer>
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
<a href="apache_beam.ml.inference.utils.html" class="btn btn-neutral float-right" title="apache_beam.ml.inference.utils module" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
<a href="apache_beam.ml.inference.tensorflow_inference.html" class="btn btn-neutral float-left" title="apache_beam.ml.inference.tensorflow_inference module" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
</div>
<hr/>
<div role="contentinfo">
<p>
&copy; Copyright
</p>
</div>
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
</div>
</div>
</section>
</div>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
</body>
</html>