blob: 9cc420aaa098f864ee4f9df5aa24c3f9c2747137 [file] [log] [blame]
<!DOCTYPE html>
<!---
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<html lang=" en"><head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="/versions/master/assets/img/mxnet-icon.png" rel="icon" type="image/png"><!-- Begin Jekyll SEO tag v2.6.1 -->
<title>Multi Threaded Inference | Apache MXNet</title>
<meta name="generator" content="Jekyll v4.0.0" />
<meta property="og:title" content="Multi Threaded Inference" />
<meta property="og:locale" content="en_US" />
<meta name="description" content="A flexible and efficient library for deep learning." />
<meta property="og:description" content="A flexible and efficient library for deep learning." />
<link rel="canonical" href="https://mxnet.apache.org/versions/master/api/cpp/docs/tutorials/multi_threaded_inference.html" />
<meta property="og:url" content="https://mxnet.apache.org/versions/master/api/cpp/docs/tutorials/multi_threaded_inference.html" />
<meta property="og:site_name" content="Apache MXNet" />
<script type="application/ld+json">
{"url":"https://mxnet.apache.org/versions/master/api/cpp/docs/tutorials/multi_threaded_inference.html","headline":"Multi Threaded Inference","description":"A flexible and efficient library for deep learning.","@type":"WebPage","@context":"https://schema.org"}</script>
<!-- End Jekyll SEO tag -->
<link rel="stylesheet" href="/versions/master/assets/docsearch.min.css" /><link rel="stylesheet" href="/versions/master/assets/main.css"><link type="application/atom+xml" rel="alternate" href="https://mxnet.apache.org/versions/master/feed.xml" title="Apache MXNet" /><!-- Matomo -->
<script>
var _paq = window._paq = window._paq || [];
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
/* We explicitly disable cookie tracking to avoid privacy issues */
_paq.push(['disableCookies']);
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="https://analytics.apache.org/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '23']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<!-- End Matomo Code -->
<script src="/versions/master/assets/js/jquery-3.3.1.min.js"></script>
<script src="/versions/master/assets/js/docsearch.min.js"></script><script src="/versions/master/assets/js/globalSearch.js" defer></script>
<script src="/versions/master/assets/js/clipboard.js" defer></script>
<script src="/versions/master/assets/js/copycode.js" defer></script></head>
<body><header class="site-header" role="banner">
<script>
$(document).ready(function () {
// HEADER OPACITY LOGIC
function opacity_header() {
var value = "rgba(4,140,204," + ($(window).scrollTop() / 300 + 0.4) + ")"
$('.site-header').css("background-color", value)
}
$(window).scroll(function () {
opacity_header()
})
opacity_header();
// MENU SELECTOR LOGIC
$('.page-link').each( function () {
if (window.location.href.includes(this.href)) {
$(this).addClass("page-current");
}
});
})
</script>
<div class="wrapper">
<a class="site-title" rel="author" href="/versions/master/"><img
src="/versions/master/assets/img/mxnet_logo.png" class="site-header-logo"></a>
<nav class="site-nav">
<input type="checkbox" id="nav-trigger" class="nav-trigger"/>
<label for="nav-trigger">
<span class="menu-icon">
<svg viewBox="0 0 18 15" width="18px" height="15px">
<path d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.032C17.335,0,18,0.665,18,1.484L18,1.484z M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.032C17.335,6.031,18,6.696,18,7.516L18,7.516z M18,13.516C18,14.335,17.335,15,16.516,15H1.484 C0.665,15,0,14.335,0,13.516l0,0c0-0.82,0.665-1.483,1.484-1.483h15.032C17.335,12.031,18,12.695,18,13.516L18,13.516z"/>
</svg>
</span>
</label>
<div class="gs-search-border">
<div id="gs-search-icon"></div>
<form id="global-search-form">
<input id="global-search" type="text" title="Search" placeholder="Search" />
<div id="global-search-dropdown-container">
<button class="gs-current-version btn" type="button" data-toggle="dropdown">
<span id="gs-current-version-label">master</span>
<svg class="gs-dropdown-caret" viewBox="0 0 32 32" class="icon icon-caret-bottom" aria-hidden="true">
<path class="dropdown-caret-path" d="M24 11.305l-7.997 11.39L8 11.305z"></path>
</svg>
</button>
<ul class="gs-opt-group gs-version-dropdown">
<li class="gs-opt gs-versions active">master</li>
<li class="gs-opt gs-versions">1.9.1</li>
<li class="gs-opt gs-versions">1.8.0</li>
<li class="gs-opt gs-versions">1.7.0</li>
<li class="gs-opt gs-versions">1.6.0</li>
<li class="gs-opt gs-versions">1.5.0</li>
<li class="gs-opt gs-versions">1.4.1</li>
<li class="gs-opt gs-versions">1.3.1</li>
<li class="gs-opt gs-versions">1.2.1</li>
<li class="gs-opt gs-versions">1.1.0</li>
<li class="gs-opt gs-versions">1.0.0</li>
<li class="gs-opt gs-versions">0.12.1</li>
<li class="gs-opt gs-versions">0.11.0</li>
</ul>
</div>
<span id="global-search-close">x</span>
</form>
</div>
<div class="trigger">
<div id="global-search-mobile-border">
<div id="gs-search-icon-mobile"></div>
<input id="global-search-mobile" placeholder="Search..." type="text"/>
<div id="global-search-dropdown-container-mobile">
<button class="gs-current-version-mobile btn" type="button" data-toggle="dropdown">
<svg class="gs-dropdown-caret" viewBox="0 0 32 32" class="icon icon-caret-bottom" aria-hidden="true">
<path class="dropdown-caret-path" d="M24 11.305l-7.997 11.39L8 11.305z"></path>
</svg>
</button>
<ul class="gs-opt-group gs-version-dropdown-mobile">
<li class="gs-opt gs-versions active">master</li>
<li class="gs-opt gs-versions">1.9.1</li>
<li class="gs-opt gs-versions">1.8.0</li>
<li class="gs-opt gs-versions">1.7.0</li>
<li class="gs-opt gs-versions">1.6.0</li>
<li class="gs-opt gs-versions">1.5.0</li>
<li class="gs-opt gs-versions">1.4.1</li>
<li class="gs-opt gs-versions">1.3.1</li>
<li class="gs-opt gs-versions">1.2.1</li>
<li class="gs-opt gs-versions">1.1.0</li>
<li class="gs-opt gs-versions">1.0.0</li>
<li class="gs-opt gs-versions">0.12.1</li>
<li class="gs-opt gs-versions">0.11.0</li>
</ul>
</div>
</div>
<a class="page-link" href="/versions/master/get_started">Get Started</a>
<a class="page-link" href="/versions/master/features">Features</a>
<a class="page-link" href="/versions/master/ecosystem">Ecosystem</a>
<a class="page-link" href="/versions/master/api">Docs & Tutorials</a>
<a class="page-link" href="/versions/master/trusted_by">Trusted By</a>
<a class="page-link" href="https://github.com/apache/mxnet">GitHub</a>
<div class="dropdown" style="min-width:100px">
<span class="dropdown-header">Apache
<svg class="dropdown-caret" viewBox="0 0 32 32" class="icon icon-caret-bottom" aria-hidden="true"><path class="dropdown-caret-path" d="M24 11.305l-7.997 11.39L8 11.305z"></path></svg>
</span>
<div class="dropdown-content" style="min-width:250px">
<a href="https://www.apache.org/foundation/">Apache Software Foundation</a>
<a href="https://www.apache.org/licenses/">License</a>
<a href="/versions/master/api/faq/security.html">Security</a>
<a href="https://privacy.apache.org/policies/privacy-policy-public.html">Privacy</a>
<a href="https://www.apache.org/events/current-event">Events</a>
<a href="https://www.apache.org/foundation/sponsorship.html">Sponsorship</a>
<a href="https://www.apache.org/foundation/thanks.html">Thanks</a>
</div>
</div>
<div class="dropdown">
<span class="dropdown-header">master
<svg class="dropdown-caret" viewBox="0 0 32 32" class="icon icon-caret-bottom" aria-hidden="true"><path class="dropdown-caret-path" d="M24 11.305l-7.997 11.39L8 11.305z"></path></svg>
</span>
<div class="dropdown-content">
<a class="dropdown-option-active" href="/">master</a>
<a href="/versions/1.9.1/">1.9.1</a>
<a href="/versions/1.8.0/">1.8.0</a>
<a href="/versions/1.7.0/">1.7.0</a>
<a href="/versions/1.6.0/">1.6.0</a>
<a href="/versions/1.5.0/">1.5.0</a>
<a href="/versions/1.4.1/">1.4.1</a>
<a href="/versions/1.3.1/">1.3.1</a>
<a href="/versions/1.2.1/">1.2.1</a>
<a href="/versions/1.1.0/">1.1.0</a>
<a href="/versions/1.0.0/">1.0.0</a>
<a href="/versions/0.12.1/">0.12.1</a>
<a href="/versions/0.11.0/">0.11.0</a>
</div>
</div>
</div>
</nav>
</div>
</header>
<main class="page-content" aria-label="Content">
<script>
</script>
<article class="post">
<header class="post-header wrapper">
<h1 class="post-title">Multi Threaded Inference</h1>
<h3></h3><a style="float:left; margin-top:20px" href="/versions/master/get_started" class="btn btn-action">Get Started
<span class="span-accented"></span></a></header>
<div class="post-content">
<div class="wrapper">
<div class="row">
<div class="col-3 docs-side-bar">
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<div class="docs-card docs-side">
<ul>
<div class="docs-action-btn">
<a href="/versions/master/api/cpp.html"> <img src="/versions/master/assets/img/compass.svg"
class="docs-logo-docs">C/C++ Guide <span
class="span-accented"></span></a>
</div>
<div class="docs-action-btn">
<a href="/versions/master/api/cpp/docs/tutorials"> <img
src="/versions/master/assets/img/video-tutorial.svg" class="docs-logo-docs">C/C++
Tutorials <span class="span-accented"></span></a>
</div>
<div class="docs-action-btn">
<a href="/versions/master/api/cpp/docs/api"> <img src="/versions/master/assets/img/api.svg"
class="docs-logo-docs">C/C++ API Reference
<span class="span-accented"></span></a>
</div>
<!-- Let's show the list of tutorials -->
<br>
<h3>Tutorials</h3>
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<li><a href="/versions/master/api/cpp/docs/tutorials/basics.html">Basics</a></li>
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<li><a href="/versions/master/api/cpp/docs/tutorials/multi_threaded_inference.html">Multi Threaded Inference</a></li>
<!-- page-category -->
<!-- resource-p -->
<li><a href="/versions/master/api/cpp/docs/tutorials/cpp_inference.html">C++ API inference tutorial</a></li>
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<li><a href="/versions/master/api/cpp/docs/tutorials/subgraph_api.html">Subgraph API</a></li>
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- page-category -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page -->
</ul>
</div>
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- resource-p -->
<!-- page -->
</ul>
</div>
<div class="col-9">
<!--- Licensed to the Apache Software Foundation (ASF) under one -->
<!--- or more contributor license agreements. See the NOTICE file -->
<!--- distributed with this work for additional information -->
<!--- regarding copyright ownership. The ASF licenses this file -->
<!--- to you under the Apache License, Version 2.0 (the -->
<!--- "License"); you may not use this file except in compliance -->
<!--- with the License. You may obtain a copy of the License at -->
<!--- http://www.apache.org/licenses/LICENSE-2.0 -->
<!--- Unless required by applicable law or agreed to in writing, -->
<!--- software distributed under the License is distributed on an -->
<!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->
<!--- KIND, either express or implied. See the License for the -->
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->
<h1 id="multi-threaded-inference-api">Multi Threaded Inference API</h1>
<p>A long standing request from MXNet users has been to invoke parallel inference on a model from multiple threads while sharing the parameters.
With this use case in mind, the threadsafe version of CachedOp was added to provide a way for customers to do multi-threaded inference for MXNet users.
This doc attempts to do the following:</p>
<ol>
<li>Discuss the current state of thread safety in MXNet</li>
<li>Explain how one can use C API and thread safe version of cached op, along with CPP package to achieve multi threaded inference. This will be useful for end users as well as frontend developers of different language bindings</li>
<li>Discuss the limitations of the above approach</li>
<li>Future Work</li>
</ol>
<h2 id="current-state-of-thread-safety-in-mxnet">Current state of Thread Safety in MXNet</h2>
<p>Examining the current state of thread safety in MXNet we can arrive to the following conclusion:</p>
<ol>
<li>MXNet Dependency Engine is thread safe (except for WaitToRead invoked inside a spawned thread. Please see Limitations section)</li>
<li>Graph Executor which is Module/Symbolic/C Predict API backend is not thread safe</li>
<li>Cached Op (Gluon Backend) is not thread safe</li>
</ol>
<p>The CachedOpThreadSafe and corresponding C APIs were added to address point 3 above and provide a way
for MXNet users to do multi-threaded inference.</p>
<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>/*!
* \brief create cached operator, allows to choose thread_safe version
* of cachedop
*/
MXNET_DLL int MXCreateCachedOp(SymbolHandle handle,
int num_flags,
const char** keys,
const char** vals,
CachedOpHandle *out,
bool thread_safe DEFAULT(false));
</code></pre></div></div>
<h2 id="multithreaded-inference-in-mxnet-with-c-api-and-cpp-package">Multithreaded inference in MXNet with C API and CPP Package</h2>
<h3 id="prerequisites">Prerequisites</h3>
<p>To complete this tutorial you need to:</p>
<ul>
<li>Learn the basics about <a href="/api/cpp">MXNet C++ API</a></li>
<li>Build MXNet from source with make/cmake</li>
<li>Build the multi-threaded inference example</li>
</ul>
<h3 id="setup-the-mxnet-c-api">Setup the MXNet C++ API</h3>
<p>To use the C++ API in MXNet, you need to build MXNet from source with C++ package. Please follow the <a href="/get_started/build_from_source.html">built from source guide</a>, and <a href="/api/cpp.html">C++ Package documentation</a>
The summary of those two documents is that you need to build MXNet from source with <code class="highlighter-rouge">USE_CPP_PACKAGE</code> flag set to 1.
This example requires a build with CUDA and CUDNN.</p>
<h3 id="get-the-example">Get the example</h3>
<p>If you have built mxnet from source with cmake, then do the following:</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span><span class="nb">cp </span>build/cpp-package/example/multi_threaded_inference <span class="nb">.</span>
</code></pre></div></div>
<h3 id="run-multi-threaded-inference-example">Run multi threaded inference example</h3>
<p>The example is tested with models such as <code class="highlighter-rouge">imagenet1k-inception-bn</code>, <code class="highlighter-rouge">imagenet1k-resnet-50</code>,
<code class="highlighter-rouge">imagenet1k-resnet-152</code>, <code class="highlighter-rouge">imagenet1k-resnet-18</code></p>
<p>To run the multi threaded inference example:</p>
<p>First export <code class="highlighter-rouge">LD_LIBRARY_PATH</code>:</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span><span class="nb">export </span><span class="nv">LD_LIBRARY_PATH</span><span class="o">=</span>&lt;MXNET_LIB_DIR&gt;:<span class="nv">$LD_LIBRARY_PATH</span>
</code></pre></div></div>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>./multi_threaded_inference <span class="o">[</span>model_name] <span class="o">[</span>is_gpu] <span class="o">[</span>file_names]
</code></pre></div></div>
<p>e.g.</p>
<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code>./multi_threaded_inference imagenet1k-inception-bn 1 grace_hopper.jpg dog.jpg
</code></pre></div></div>
<p>The above script spawns 2 threads, shares the same cachedop and params among two threads, and runs inference on GPU. It returns the inference results in the order in which files are provided.</p>
<p>NOTE: This example is to demonstrate the multi-threaded-inference with cached op. The inference results work well only with specific models (e.g. imagenet1k-inception-bn). The results may not necessarily be very accurate because of different preprocessing step required etc.</p>
<h3 id="code-walkthrough-multi-threaded-inference-with-cachedop">Code walkthrough multi-threaded inference with CachedOp</h3>
<p>The multi threaded inference example (<code class="highlighter-rouge">multi_threaded_inference.cc</code>) involves the following steps:</p>
<ol>
<li>Parse arguments and load input image into ndarray</li>
<li>Prepare input data and load parameters, copying data to a specific context</li>
<li>Preparing arguments to pass to the CachedOp and calling C API to <strong>create cached op</strong></li>
<li>Prepare lambda function which will run in spawned threads. Call C API to <strong>invoke cached op</strong> within the lambda function.</li>
<li>Spawn multiple threads and wait for all threads to complete.</li>
<li>Post process data to obtain inference results and cleanup.</li>
</ol>
<h3 id="step-1-parse-arguments-and-load-input-image-into-ndarray">Step 1: Parse arguments and load input image into ndarray</h3>
<p><a href="multi_threaded_inference.cc#L299-L341">https://github.com/apache/mxnet/example/multi_threaded_inference/multi_threaded_inference.cc#L299-L341</a></p>
<p>The above code parses arguments, loads the image file into a ndarray with a specific shape. There are a few things that are set by default and not configurable. For example, <code class="highlighter-rouge">static_alloc</code> and <code class="highlighter-rouge">static_shape</code> are by default set to true.</p>
<h3 id="step-2-prepare-input-data-and-load-parameters-copying-data-to-a-specific-context">Step 2: Prepare input data and load parameters, copying data to a specific context</h3>
<p><a href="multi_threaded_inference.cc#L147-L205">https://github.com/apache/mxnet/example/multi_threaded_inference/multi_threaded_inference.cc#L147-L205</a></p>
<p>The above code loads params and copies input data and params to specific context.</p>
<h3 id="step-3-preparing-arguments-to-pass-to-the-cachedop-and-calling-c-api-to-create-cached-op">Step 3: Preparing arguments to pass to the CachedOp and calling C API to create cached op</h3>
<p><a href="multi_threaded_inference.cc#L207-233">https://github.com/apache/mxnet/example/multi_threaded_inference/multi_threaded_inference.cc#L207-L233</a></p>
<p>The above code prepares <code class="highlighter-rouge">flag_key_cstrs</code> and <code class="highlighter-rouge">flag_val_cstrs</code> to be passed the Cached op.
The C API call is made with <code class="highlighter-rouge">MXCreateCachedOp</code>. This will lead to creation of thread safe cached
op since the <code class="highlighter-rouge">thread_safe</code> (which is the last parameter to <code class="highlighter-rouge">MXCreateCachedOp</code>) is set to
true. When this is set to false, it will invoke CachedOp instead of CachedOpThreadSafe.</p>
<h3 id="step-4-prepare-lambda-function-which-will-run-in-spawned-threads">Step 4: Prepare lambda function which will run in spawned threads</h3>
<p><a href="multi_threaded_inference.cc#L248-262">https://github.com/apache/mxnet/example/multi_threaded_inference/multi_threaded_inference.cc#L248-L262</a></p>
<p>The above creates the lambda function taking the thread number as the argument.
If <code class="highlighter-rouge">random_sleep</code> is set it will sleep for a random number (secs) generated between 0 to 5 seconds.
Following this, it invokes <code class="highlighter-rouge">MXInvokeCachedOp</code>(from the hdl it determines whether to invoke cached op threadsafe version or not).
When this is set to false, it will invoke CachedOp instead of CachedOpThreadSafe.</p>
<h3 id="step-5-spawn-multiple-threads-and-wait-for-all-threads-to-complete">Step 5: Spawn multiple threads and wait for all threads to complete</h3>
<p><a href="multi_threaded_inference.cc#L264-L276">https://github.com/anirudh2290/apache/mxnet/example/multi_threaded_inference/multi_threaded_inference.cc#L264-L276</a></p>
<p>Spawns multiple threads, joins and waits to wait for all ops to complete.
The other alternative is to wait in the thread on the output ndarray and remove the WaitAll after join.</p>
<h3 id="step-6-post-process-data-to-obtain-inference-results-and-cleanup">Step 6: Post process data to obtain inference results and cleanup</h3>
<p><a href="multi_threaded_inference.cc#L286-293">https://github.com/apache/mxnet/example/multi_threaded_inference/multi_threaded_inference.cc#L286-L293</a></p>
<p>The above code outputs results for different threads and cleans up the thread safe cached op.</p>
<h2 id="current-limitations">Current Limitations</h2>
<ol>
<li>Only operators tested with the existing model coverage are supported. Other operators and operator types (stateful operators, custom operators are not supported. Existing model coverage is as follows (this list will keep growing as we test more models with different model types):</li>
</ol>
<table>
<thead>
<tr>
<th>Models Tested</th>
<th>oneDNN</th>
<th>CUDNN</th>
<th>NO-CUDNN</th>
</tr>
</thead>
<tbody>
<tr>
<td>imagenet1k-resnet-18</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td>imagenet1k-resnet-152</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td>imagenet1k-resnet-50</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
</tr>
</tbody>
</table>
<ol>
<li>Only dense storage types are supported currently.</li>
<li>Multi GPU Inference not supported currently.</li>
<li>Instantiating multiple instances of SymbolBlockThreadSafe is not supported. Can run parallel inference only on one model per process.</li>
<li>dynamic shapes not supported in thread safe cached op.</li>
<li>Bulking of ops is not supported.</li>
<li>This only supports inference use cases currently, training use cases are not supported.</li>
<li>Graph rewrites with subgraph API currently not supported.</li>
<li>There is currently no frontend API support to run multi threaded inference. Users can use CreateCachedOp and InvokeCachedOp in combination with
the CPP frontend to run multi-threaded inference as of today.</li>
<li>Multi threaded inference with threaded engine with Module/Symbolic API and C Predict API are not currently supported.</li>
<li>Exception thrown with <code class="highlighter-rouge">wait_to_read</code> in individual threads can cause issues. Calling invoke from each thread and calling WaitAll after thread joins should still work fine.</li>
<li>Tested only on environments supported by CI. This means that MacOS is not supported.</li>
</ol>
<h2 id="future-work">Future Work</h2>
<p>Future work includes Increasing model coverage and addressing most of the limitations mentioned under Current Limitations except the training use case.
For more updates, please subscribe to discussion activity on RFC: https://github.com/apache/mxnet/issues/16431.</p>
</div>
</div>
</div>
</div>
</article>
</main><footer class="site-footer h-card">
<div class="wrapper">
<div class="row">
<div class="col-4">
<h4 class="footer-category-title">Resources</h4>
<ul class="contact-list">
<li><a href="/versions/master/community#stay-connected">Mailing lists</a></li>
<li><a href="/versions/master/community#github-issues">Github Issues</a></li>
<li><a href="https://github.com/apache/mxnet/projects">Projects</a></li>
<li><a href="https://cwiki.apache.org/confluence/display/MXNET/Apache+MXNet+Home">Developer Wiki</a></li>
<li><a href="https://discuss.mxnet.io">Forum</a></li>
<li><a href="/versions/master/community">Contribute To MXNet</a></li>
</ul>
</div>
<div class="col-4"><ul class="social-media-list"><li><a href="https://github.com/apache/mxnet"><svg class="svg-icon"><use xlink:href="/versions/master/assets/minima-social-icons.svg#github"></use></svg> <span class="username">apache/mxnet</span></a></li><li><a href="https://www.twitter.com/apachemxnet"><svg class="svg-icon"><use xlink:href="/versions/master/assets/minima-social-icons.svg#twitter"></use></svg> <span class="username">apachemxnet</span></a></li><li><a href="https://youtube.com/apachemxnet"><svg class="svg-icon"><use xlink:href="/versions/master/assets/minima-social-icons.svg#youtube"></use></svg> <span class="username">apachemxnet</span></a></li></ul>
</div>
<div class="col-4 footer-text">
<p>A flexible and efficient library for deep learning.</p>
</div>
</div>
</div>
</footer>
<footer class="site-footer2">
<div class="wrapper">
<div class="row">
<div class="col-3">
<img src="/versions/master/assets/img/asf_logo.svg" class="footer-logo col-2">
</div>
<div class="footer-bottom-warning col-9">
</p><p>"Copyright © 2017-2022, The Apache Software Foundation. Licensed under the Apache License, Version 2.0. Apache MXNet, MXNet, Apache, the Apache
feather, and the Apache MXNet project logo are either registered trademarks or trademarks of the
Apache Software Foundation."</p>
</div>
</div>
</div>
</footer>
</body>
</html>