blob: d40037c3e0a73102f48c33446f2face87b579515 [file] [log] [blame]
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>apache_beam.ml.inference.huggingface_inference module &mdash; Apache Beam 2.52.0 documentation</title>
<script type="text/javascript" src="_static/js/modernizr.min.js"></script>
<script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/language_data.js"></script>
<script async="async" type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
<script type="text/javascript" src="_static/js/theme.js"></script>
<link rel="stylesheet" href="_static/css/theme.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="apache_beam.ml.inference.onnx_inference module" href="apache_beam.ml.inference.onnx_inference.html" />
<link rel="prev" title="apache_beam.ml.inference.base module" href="apache_beam.ml.inference.base.html" />
</head>
<body class="wy-body-for-nav">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search" >
<a href="index.html" class="icon icon-home"> Apache Beam
</a>
<div class="version">
2.52.0
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
<input type="text" name="q" placeholder="Search docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="apache_beam.coders.html">apache_beam.coders package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.dataframe.html">apache_beam.dataframe package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.io.html">apache_beam.io package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.metrics.html">apache_beam.metrics package</a></li>
<li class="toctree-l1 current"><a class="reference internal" href="apache_beam.ml.html">apache_beam.ml package</a><ul class="current">
<li class="toctree-l2 current"><a class="reference internal" href="apache_beam.ml.html#subpackages">Subpackages</a><ul class="current">
<li class="toctree-l3"><a class="reference internal" href="apache_beam.ml.gcp.html">apache_beam.ml.gcp package</a></li>
<li class="toctree-l3 current"><a class="reference internal" href="apache_beam.ml.inference.html">apache_beam.ml.inference package</a><ul class="current">
<li class="toctree-l4 current"><a class="reference internal" href="apache_beam.ml.inference.html#submodules">Submodules</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="apache_beam.ml.transforms.html">apache_beam.ml.transforms package</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.options.html">apache_beam.options package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.portability.html">apache_beam.portability package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.runners.html">apache_beam.runners package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.testing.html">apache_beam.testing package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.transforms.html">apache_beam.transforms package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.typehints.html">apache_beam.typehints package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.utils.html">apache_beam.utils package</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.yaml.html">apache_beam.yaml package</a></li>
</ul>
<ul>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.error.html">apache_beam.error module</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.pipeline.html">apache_beam.pipeline module</a></li>
<li class="toctree-l1"><a class="reference internal" href="apache_beam.pvalue.html">apache_beam.pvalue module</a></li>
</ul>
</div>
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
<nav class="wy-nav-top" aria-label="top navigation">
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="index.html">Apache Beam</a>
</nav>
<div class="wy-nav-content">
<div class="rst-content">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="wy-breadcrumbs">
<li><a href="index.html">Docs</a> &raquo;</li>
<li><a href="apache_beam.ml.html">apache_beam.ml package</a> &raquo;</li>
<li><a href="apache_beam.ml.inference.html">apache_beam.ml.inference package</a> &raquo;</li>
<li>apache_beam.ml.inference.huggingface_inference module</li>
<li class="wy-breadcrumbs-aside">
<a href="_sources/apache_beam.ml.inference.huggingface_inference.rst.txt" rel="nofollow"> View page source</a>
</li>
</ul>
<hr/>
</div>
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
<div itemprop="articleBody">
<div class="section" id="module-apache_beam.ml.inference.huggingface_inference">
<span id="apache-beam-ml-inference-huggingface-inference-module"></span><h1>apache_beam.ml.inference.huggingface_inference module<a class="headerlink" href="#module-apache_beam.ml.inference.huggingface_inference" title="Permalink to this headline"></a></h1>
<dl class="class">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor">
<em class="property">class </em><code class="descclassname">apache_beam.ml.inference.huggingface_inference.</code><code class="descname">HuggingFaceModelHandlerKeyedTensor</code><span class="sig-paren">(</span><em>model_uri: str, model_class: Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10faa60&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10fa190&gt;], framework: str, device: str = 'CPU', *, inference_fn: Optional[Callable[[...], Iterable[apache_beam.ml.inference.base.PredictionResult]]] = None, load_model_args: Optional[Dict[str, Any]] = None, inference_args: Optional[Dict[str, Any]] = None, min_batch_size: Optional[int] = None, max_batch_size: Optional[int] = None, large_model: bool = False, **kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="apache_beam.ml.inference.base.html#apache_beam.ml.inference.base.ModelHandler" title="apache_beam.ml.inference.base.ModelHandler"><code class="xref py py-class docutils literal notranslate"><span class="pre">apache_beam.ml.inference.base.ModelHandler</span></code></a></p>
<p>Implementation of the ModelHandler interface for HuggingFace with
Keyed Tensors for PyTorch/Tensorflow backend.</p>
<dl class="docutils">
<dt>Example Usage model::</dt>
<dd><dl class="first last docutils">
<dt>pcoll | RunInference(HuggingFaceModelHandlerKeyedTensor(</dt>
<dd>model_uri=”bert-base-uncased”, model_class=AutoModelForMaskedLM,
framework=’pt’))</dd>
</dl>
</dd>
</dl>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>model_uri</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a>) – path to the pretrained model on the hugging face
models hub.</li>
<li><strong>model_class</strong> – model class to load the repository from model_uri.</li>
<li><strong>framework</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a>) – Framework to use for the model. ‘tf’ for TensorFlow and
‘pt’ for PyTorch.</li>
<li><strong>device</strong> – For torch tensors, specify device on which you wish to
run the model. Defaults to CPU.</li>
<li><strong>inference_fn</strong> – the inference function to use during RunInference.
Default is _run_inference_torch_keyed_tensor or
_run_inference_tensorflow_keyed_tensor depending on the input type.</li>
<li><strong>load_model_args</strong> (<em>Dict</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em>, </em><em>Any</em><em>]</em>) – (Optional) Keyword arguments to provide
load options while loading models from Hugging Face Hub.
Defaults to None.</li>
<li><strong>inference_args</strong> (<em>Dict</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em>, </em><em>Any</em><em>]</em>) – (Optional) Non-batchable arguments
required as inputs to the model’s inference function. Unlike Tensors
in <cite>batch</cite>, these parameters will not be dynamically batched.
Defaults to None.</li>
<li><strong>min_batch_size</strong> – the minimum batch size to use when batching inputs.</li>
<li><strong>max_batch_size</strong> – the maximum batch size to use when batching inputs.</li>
<li><strong>large_model</strong> – set to true if your model is large enough to run into
memory pressure if you load multiple copies. Given a model that
consumes N memory and a machine with W cores and M memory, you should
set this to True if N*W &gt; M.</li>
<li><strong>kwargs</strong> – ‘env_vars’ can be used to set environment variables
before loading the model.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p><strong>Supported Versions:</strong> HuggingFaceModelHandler supports
transformers&gt;=4.18.0.</p>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.load_model">
<code class="descname">load_model</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor.load_model"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.load_model" title="Permalink to this definition"></a></dt>
<dd><p>Loads and initializes the model for processing.</p>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.run_inference">
<code class="descname">run_inference</code><span class="sig-paren">(</span><em>batch: Sequence[Dict[str, Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f12cf6d0&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f12cf340&gt;]]], model: Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10faa60&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10fa190&gt;], inference_args: Optional[Dict[str, Any]] = None</em><span class="sig-paren">)</span> &#x2192; Iterable[apache_beam.ml.inference.base.PredictionResult]<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor.run_inference"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.run_inference" title="Permalink to this definition"></a></dt>
<dd><p>Runs inferences on a batch of Keyed Tensors and returns an Iterable of
Tensors Predictions.</p>
<p>This method stacks the list of Tensors in a vectorized format to optimize
the inference call.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>batch</strong> – A sequence of Keyed Tensors. These Tensors should be batchable,
as this method will call <cite>tf.stack()</cite>/<cite>torch.stack()</cite> and pass in
batched Tensors with dimensions (batch_size, n_features, etc.) into
the model’s predict() function.</li>
<li><strong>model</strong> – A Tensorflow/PyTorch model.</li>
<li><strong>inference_args</strong> – Non-batchable arguments required as inputs to the
model’s inference function. Unlike Tensors in <cite>batch</cite>,
these parameters will not be dynamically batched.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">An Iterable of type PredictionResult.</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.update_model_path">
<code class="descname">update_model_path</code><span class="sig-paren">(</span><em>model_path: Optional[str] = None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor.update_model_path"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.update_model_path" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.get_num_bytes">
<code class="descname">get_num_bytes</code><span class="sig-paren">(</span><em>batch: Sequence[Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f0f62550&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f0f62f10&gt;]]</em><span class="sig-paren">)</span> &#x2192; int<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor.get_num_bytes"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.get_num_bytes" title="Permalink to this definition"></a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">The number of bytes of data for the Tensors batch.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.batch_elements_kwargs">
<code class="descname">batch_elements_kwargs</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor.batch_elements_kwargs"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.batch_elements_kwargs" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.share_model_across_processes">
<code class="descname">share_model_across_processes</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; bool<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor.share_model_across_processes"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.share_model_across_processes" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.get_metrics_namespace">
<code class="descname">get_metrics_namespace</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; str<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerKeyedTensor.get_metrics_namespace"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerKeyedTensor.get_metrics_namespace" title="Permalink to this definition"></a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">A namespace for metrics collected by the RunInference transform.</td>
</tr>
</tbody>
</table>
</dd></dl>
</dd></dl>
<dl class="class">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor">
<em class="property">class </em><code class="descclassname">apache_beam.ml.inference.huggingface_inference.</code><code class="descname">HuggingFaceModelHandlerTensor</code><span class="sig-paren">(</span><em>model_uri: str, model_class: Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10faa60&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10fa190&gt;], device: str = 'CPU', *, inference_fn: Optional[Callable[[...], Iterable[apache_beam.ml.inference.base.PredictionResult]]] = None, load_model_args: Optional[Dict[str, Any]] = None, inference_args: Optional[Dict[str, Any]] = None, min_batch_size: Optional[int] = None, max_batch_size: Optional[int] = None, large_model: bool = False, **kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="apache_beam.ml.inference.base.html#apache_beam.ml.inference.base.ModelHandler" title="apache_beam.ml.inference.base.ModelHandler"><code class="xref py py-class docutils literal notranslate"><span class="pre">apache_beam.ml.inference.base.ModelHandler</span></code></a></p>
<p>Implementation of the ModelHandler interface for HuggingFace with
Tensors for PyTorch/Tensorflow backend.</p>
<p>Depending on the type of tensors, the model framework is determined
automatically.</p>
<dl class="docutils">
<dt>Example Usage model:</dt>
<dd><dl class="first last docutils">
<dt>pcoll | RunInference(HuggingFaceModelHandlerTensor(</dt>
<dd>model_uri=”bert-base-uncased”, model_class=AutoModelForMaskedLM))</dd>
</dl>
</dd>
</dl>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>model_uri</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a>) – path to the pretrained model on the hugging face
models hub.</li>
<li><strong>model_class</strong> – model class to load the repository from model_uri.</li>
<li><strong>device</strong> – For torch tensors, specify device on which you wish to
run the model. Defaults to CPU.</li>
<li><strong>inference_fn</strong> – the inference function to use during RunInference.
Default is _run_inference_torch_keyed_tensor or
_run_inference_tensorflow_keyed_tensor depending on the input type.</li>
<li><strong>load_model_args</strong> (<em>Dict</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em>, </em><em>Any</em><em>]</em>) – (Optional) keyword arguments to provide
load options while loading models from Hugging Face Hub.
Defaults to None.</li>
<li><strong>inference_args</strong> (<em>Dict</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em>, </em><em>Any</em><em>]</em>) – (Optional) Non-batchable arguments
required as inputs to the model’s inference function. Unlike Tensors
in <cite>batch</cite>, these parameters will not be dynamically batched.
Defaults to None.</li>
<li><strong>min_batch_size</strong> – the minimum batch size to use when batching inputs.</li>
<li><strong>max_batch_size</strong> – the maximum batch size to use when batching inputs.</li>
<li><strong>large_model</strong> – set to true if your model is large enough to run into
memory pressure if you load multiple copies. Given a model that
consumes N memory and a machine with W cores and M memory, you should
set this to True if N*W &gt; M.</li>
<li><strong>kwargs</strong> – ‘env_vars’ can be used to set environment variables
before loading the model.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p><strong>Supported Versions:</strong> HuggingFaceModelHandler supports
transformers&gt;=4.18.0.</p>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.load_model">
<code class="descname">load_model</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor.load_model"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.load_model" title="Permalink to this definition"></a></dt>
<dd><p>Loads and initializes the model for processing.</p>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.run_inference">
<code class="descname">run_inference</code><span class="sig-paren">(</span><em>batch: Sequence[Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f13fda30&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f13fdd00&gt;]], model: Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10faa60&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10fa190&gt;], inference_args: Optional[Dict[str, Any]] = None</em><span class="sig-paren">)</span> &#x2192; Iterable[apache_beam.ml.inference.base.PredictionResult]<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor.run_inference"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.run_inference" title="Permalink to this definition"></a></dt>
<dd><p>Runs inferences on a batch of Tensors and returns an Iterable of
Tensors Predictions.</p>
<p>This method stacks the list of Tensors in a vectorized format to optimize
the inference call.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>batch</strong> – A sequence of Tensors. These Tensors should be batchable, as
this method will call <cite>tf.stack()</cite>/<cite>torch.stack()</cite> and pass in
batched Tensors with dimensions (batch_size, n_features, etc.)
into the model’s predict() function.</li>
<li><strong>model</strong> – A Tensorflow/PyTorch model.</li>
<li><strong>inference_args</strong> (<em>Dict</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em>, </em><em>Any</em><em>]</em>) – Non-batchable arguments required as
inputs to the model’s inference function. Unlike Tensors in <cite>batch</cite>,
these parameters will not be dynamically batched.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">An Iterable of type PredictionResult.</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.update_model_path">
<code class="descname">update_model_path</code><span class="sig-paren">(</span><em>model_path: Optional[str] = None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor.update_model_path"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.update_model_path" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.get_num_bytes">
<code class="descname">get_num_bytes</code><span class="sig-paren">(</span><em>batch: Sequence[Union[&lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f13fd820&gt;, &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f13fdbb0&gt;]]</em><span class="sig-paren">)</span> &#x2192; int<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor.get_num_bytes"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.get_num_bytes" title="Permalink to this definition"></a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">The number of bytes of data for the Tensors batch.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.batch_elements_kwargs">
<code class="descname">batch_elements_kwargs</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor.batch_elements_kwargs"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.batch_elements_kwargs" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.share_model_across_processes">
<code class="descname">share_model_across_processes</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; bool<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor.share_model_across_processes"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.share_model_across_processes" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.get_metrics_namespace">
<code class="descname">get_metrics_namespace</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; str<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFaceModelHandlerTensor.get_metrics_namespace"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFaceModelHandlerTensor.get_metrics_namespace" title="Permalink to this definition"></a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">A namespace for metrics collected by the RunInference transform.</td>
</tr>
</tbody>
</table>
</dd></dl>
</dd></dl>
<dl class="class">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler">
<em class="property">class </em><code class="descclassname">apache_beam.ml.inference.huggingface_inference.</code><code class="descname">HuggingFacePipelineModelHandler</code><span class="sig-paren">(</span><em>task: Union[str, apache_beam.ml.inference.huggingface_inference.PipelineTask] = '', model: str = '', *, inference_fn: Callable[[Sequence[str], &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10fa820&gt;, Optional[Dict[str, Any]]], Iterable[apache_beam.ml.inference.base.PredictionResult]] = &lt;function _default_pipeline_inference_fn&gt;, load_pipeline_args: Optional[Dict[str, Any]] = None, inference_args: Optional[Dict[str, Any]] = None, min_batch_size: Optional[int] = None, max_batch_size: Optional[int] = None, large_model: bool = False, **kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler" title="Permalink to this definition"></a></dt>
<dd><p>Bases: <a class="reference internal" href="apache_beam.ml.inference.base.html#apache_beam.ml.inference.base.ModelHandler" title="apache_beam.ml.inference.base.ModelHandler"><code class="xref py py-class docutils literal notranslate"><span class="pre">apache_beam.ml.inference.base.ModelHandler</span></code></a></p>
<p>Implementation of the ModelHandler interface for Hugging Face Pipelines.</p>
<p><strong>Note:</strong> To specify which device to use (CPU/GPU),
use the load_pipeline_args with key-value as you would do in the usual
Hugging Face pipeline. Ex: load_pipeline_args={‘device’:0})</p>
<dl class="docutils">
<dt>Example Usage model::</dt>
<dd><dl class="first last docutils">
<dt>pcoll | RunInference(HuggingFacePipelineModelHandler(</dt>
<dd>task=”fill-mask”))</dd>
</dl>
</dd>
</dl>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>task</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em> or </em><a class="reference external" href="https://docs.python.org/3/library/enum.html#enum.Enum" title="(in Python v3.12)"><em>enum.Enum</em></a>) – task supported by HuggingFace Pipelines.
Accepts a string task or an enum.Enum from PipelineTask.</li>
<li><strong>model</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a>) – <p>path to the pretrained <em>model-id</em> on Hugging Face Models Hub
to use custom model for the chosen task. If the <cite>model</cite> already defines
the task then no need to specify the <cite>task</cite> parameter.
Use the <em>model-id</em> string instead of an actual model here.
Model-specific kwargs for <cite>from_pretrained(…, **model_kwargs)</cite> can be
specified with <cite>model_kwargs</cite> using <cite>load_pipeline_args</cite>.</p>
<dl class="docutils">
<dt>Example Usage::</dt>
<dd><dl class="first last docutils">
<dt>model_handler = HuggingFacePipelineModelHandler(</dt>
<dd>task=”text-generation”, model=”meta-llama/Llama-2-7b-hf”,
load_pipeline_args={‘model_kwargs’:{‘quantization_map’:config}})</dd>
</dl>
</dd>
</dl>
</li>
<li><strong>inference_fn</strong> – the inference function to use during RunInference.
Default is _default_pipeline_inference_fn.</li>
<li><strong>load_pipeline_args</strong> (<em>Dict</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em>, </em><em>Any</em><em>]</em>) – keyword arguments to provide load
options while loading pipelines from Hugging Face. Defaults to None.</li>
<li><strong>inference_args</strong> (<em>Dict</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a><em>, </em><em>Any</em><em>]</em>) – Non-batchable arguments
required as inputs to the model’s inference function.
Defaults to None.</li>
<li><strong>min_batch_size</strong> – the minimum batch size to use when batching inputs.</li>
<li><strong>max_batch_size</strong> – the maximum batch size to use when batching inputs.</li>
<li><strong>large_model</strong> – set to true if your model is large enough to run into
memory pressure if you load multiple copies. Given a model that
consumes N memory and a machine with W cores and M memory, you should
set this to True if N*W &gt; M.</li>
<li><strong>kwargs</strong> – ‘env_vars’ can be used to set environment variables
before loading the model.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p><strong>Supported Versions:</strong> HuggingFacePipelineModelHandler supports
transformers&gt;=4.18.0.</p>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.load_model">
<code class="descname">load_model</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler.load_model"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.load_model" title="Permalink to this definition"></a></dt>
<dd><p>Loads and initializes the pipeline for processing.</p>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.run_inference">
<code class="descname">run_inference</code><span class="sig-paren">(</span><em>batch: Sequence[str], pipeline: &lt;sphinx.ext.autodoc.importer._MockObject object at 0x7f50f10fa820&gt;, inference_args: Optional[Dict[str, Any]] = None</em><span class="sig-paren">)</span> &#x2192; Iterable[apache_beam.ml.inference.base.PredictionResult]<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler.run_inference"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.run_inference" title="Permalink to this definition"></a></dt>
<dd><p>Runs inferences on a batch of examples passed as a string resource.
These can either be string sentences, or string path to images or
audio files.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>batch</strong> – A sequence of strings resources.</li>
<li><strong>pipeline</strong> – A Hugging Face Pipeline.</li>
<li><strong>inference_args</strong> – Non-batchable arguments required as inputs to the model’s
inference function.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first last">An Iterable of type PredictionResult.</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.update_model_path">
<code class="descname">update_model_path</code><span class="sig-paren">(</span><em>model_path: Optional[str] = None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler.update_model_path"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.update_model_path" title="Permalink to this definition"></a></dt>
<dd><p>Updates the pretrained model used by the Hugging Face Pipeline task.
Make sure that the new model does the same task as initial model.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>model_path</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.12)"><em>str</em></a>) – (Optional) Path to the new trained model
from Hugging Face. Defaults to None.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.get_num_bytes">
<code class="descname">get_num_bytes</code><span class="sig-paren">(</span><em>batch: Sequence[str]</em><span class="sig-paren">)</span> &#x2192; int<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler.get_num_bytes"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.get_num_bytes" title="Permalink to this definition"></a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">The number of bytes of input batch elements.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.batch_elements_kwargs">
<code class="descname">batch_elements_kwargs</code><span class="sig-paren">(</span><span class="sig-paren">)</span><a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler.batch_elements_kwargs"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.batch_elements_kwargs" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.share_model_across_processes">
<code class="descname">share_model_across_processes</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; bool<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler.share_model_across_processes"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.share_model_across_processes" title="Permalink to this definition"></a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.get_metrics_namespace">
<code class="descname">get_metrics_namespace</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; str<a class="reference internal" href="_modules/apache_beam/ml/inference/huggingface_inference.html#HuggingFacePipelineModelHandler.get_metrics_namespace"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#apache_beam.ml.inference.huggingface_inference.HuggingFacePipelineModelHandler.get_metrics_namespace" title="Permalink to this definition"></a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">A namespace for metrics collected by the RunInference transform.</td>
</tr>
</tbody>
</table>
</dd></dl>
</dd></dl>
</div>
</div>
</div>
<footer>
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
<a href="apache_beam.ml.inference.onnx_inference.html" class="btn btn-neutral float-right" title="apache_beam.ml.inference.onnx_inference module" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
<a href="apache_beam.ml.inference.base.html" class="btn btn-neutral float-left" title="apache_beam.ml.inference.base module" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
</div>
<hr/>
<div role="contentinfo">
<p>
&copy; Copyright
</p>
</div>
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
</div>
</div>
</section>
</div>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
</body>
</html>