blob: ea09d036d813b137ab4c96e4278bcdc03ca7daee [file] [log] [blame]
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta content="IE=edge" http-equiv="X-UA-Compatible"/>
<meta content="width=device-width, initial-scale=1" name="viewport"/>
<title>Gluon Model Zoo — mxnet documentation</title>
<link crossorigin="anonymous" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" rel="stylesheet"/>
<link href="https://maxcdn.bootstrapcdn.com/font-awesome/4.5.0/css/font-awesome.min.css" rel="stylesheet"/>
<link href="../../../_static/basic.css" rel="stylesheet" type="text/css">
<link href="../../../_static/pygments.css" rel="stylesheet" type="text/css">
<link href="../../../_static/mxnet.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../../../',
VERSION: '',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: ''
};
</script>
<script src="https://code.jquery.com/jquery-1.11.1.min.js" type="text/javascript"></script>
<script src="../../../_static/underscore.js" type="text/javascript"></script>
<script src="../../../_static/searchtools_custom.js" type="text/javascript"></script>
<script src="../../../_static/doctools.js" type="text/javascript"></script>
<script src="../../../_static/selectlang.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML" type="text/javascript"></script>
<script type="text/javascript"> jQuery(function() { Search.loadIndex("/searchindex.js"); Search.init();}); </script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new
Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-96378503-1', 'auto');
ga('send', 'pageview');
</script>
<!-- -->
<!-- <script type="text/javascript" src="../../../_static/jquery.js"></script> -->
<!-- -->
<!-- <script type="text/javascript" src="../../../_static/underscore.js"></script> -->
<!-- -->
<!-- <script type="text/javascript" src="../../../_static/doctools.js"></script> -->
<!-- -->
<!-- <script type="text/javascript" src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script> -->
<!-- -->
<link href="gluon.html" rel="up" title="Gluon Package">
<link href="contrib.html" rel="next" title="Gluon Contrib API"/>
<link href="data.html" rel="prev" title="Gluon Data API"/>
<link href="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/mxnet-icon.png" rel="icon" type="image/png"/>
</link></link></link></head>
<body background="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/mxnet-background-compressed.jpeg" role="document">
<div class="content-block"><div class="navbar navbar-fixed-top">
<div class="container" id="navContainer">
<div class="innder" id="header-inner">
<h1 id="logo-wrap">
<a href="../../../" id="logo"><img src="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/mxnet_logo.png"/></a>
</h1>
<nav class="nav-bar" id="main-nav">
<a class="main-nav-link" href="../../../install/index.html">Install</a>
<span id="dropdown-menu-position-anchor">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">Gluon <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu">
<li><a class="main-nav-link" href="../../../gluon/index.html">About</a></li>
<li><a class="main-nav-link" href="http://gluon.mxnet.io">Tutorials</a></li>
</ul>
</span>
<span id="dropdown-menu-position-anchor">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">API <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu">
<li><a class="main-nav-link" href="../../../api/python/index.html">Python</a></li>
<li><a class="main-nav-link" href="../../../api/scala/index.html">Scala</a></li>
<li><a class="main-nav-link" href="../../../api/r/index.html">R</a></li>
<li><a class="main-nav-link" href="../../../api/julia/index.html">Julia</a></li>
<li><a class="main-nav-link" href="../../../api/c++/index.html">C++</a></li>
<li><a class="main-nav-link" href="../../../api/perl/index.html">Perl</a></li>
</ul>
</span>
<span id="dropdown-menu-position-anchor-docs">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">Docs <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu-docs">
<li><a class="main-nav-link" href="../../../tutorials/index.html">Tutorials</a>
<li><a class="main-nav-link" href="../../../faq/index.html">FAQ</a></li>
<li><a class="main-nav-link" href="../../../architecture/index.html">Architecture</a></li>
<li><a class="main-nav-link" href="https://github.com/apache/incubator-mxnet/tree/master/example">Examples</a></li>
<li><a class="main-nav-link" href="../../../model_zoo/index.html">Model Zoo</a></li>
</li></ul>
</span>
<a class="main-nav-link" href="https://github.com/dmlc/mxnet">Github</a>
<span id="dropdown-menu-position-anchor-community">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">Community <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu-community">
<li><a class="main-nav-link" href="../../../community/index.html">Community</a></li>
<li><a class="main-nav-link" href="../../../community/contribute.html">Contribute</a></li>
<li><a class="main-nav-link" href="../../../community/powered_by.html">Powered By</a></li>
<li><a class="main-nav-link" href="http://discuss.mxnet.io">Discuss</a></li>
</ul>
</span>
<span id="dropdown-menu-position-anchor-version" style="position: relative"><a href="#" class="main-nav-link dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="true">Versions(master)<span class="caret"></span></a><ul id="package-dropdown-menu" class="dropdown-menu"><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/>1.0.0</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/0.12.1/index.html>0.12.1</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/0.12.0/index.html>0.12.0</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/0.11.0/index.html>0.11.0</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/master/index.html>master</a></li></ul></span></nav>
<script> function getRootPath(){ return "../../../" } </script>
<div class="burgerIcon dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button"></a>
<ul class="dropdown-menu" id="burgerMenu">
<li><a href="../../../install/index.html">Install</a></li>
<li><a class="main-nav-link" href="../../../tutorials/index.html">Tutorials</a></li>
<li class="dropdown-submenu">
<a href="#" tabindex="-1">Community</a>
<ul class="dropdown-menu">
<li><a href="../../../community/index.html" tabindex="-1">Community</a></li>
<li><a href="../../../community/contribute.html" tabindex="-1">Contribute</a></li>
<li><a href="../../../community/powered_by.html" tabindex="-1">Powered By</a></li>
</ul>
</li>
<li class="dropdown-submenu">
<a href="#" tabindex="-1">API</a>
<ul class="dropdown-menu">
<li><a href="../../../api/python/index.html" tabindex="-1">Python</a>
</li>
<li><a href="../../../api/scala/index.html" tabindex="-1">Scala</a>
</li>
<li><a href="../../../api/r/index.html" tabindex="-1">R</a>
</li>
<li><a href="../../../api/julia/index.html" tabindex="-1">Julia</a>
</li>
<li><a href="../../../api/c++/index.html" tabindex="-1">C++</a>
</li>
<li><a href="../../../api/perl/index.html" tabindex="-1">Perl</a>
</li>
</ul>
</li>
<li class="dropdown-submenu">
<a href="#" tabindex="-1">Docs</a>
<ul class="dropdown-menu">
<li><a href="../../../tutorials/index.html" tabindex="-1">Tutorials</a></li>
<li><a href="../../../faq/index.html" tabindex="-1">FAQ</a></li>
<li><a href="../../../architecture/index.html" tabindex="-1">Architecture</a></li>
<li><a href="https://github.com/apache/incubator-mxnet/tree/master/example" tabindex="-1">Examples</a></li>
<li><a href="../../../model_zoo/index.html" tabindex="-1">Model Zoo</a></li>
</ul>
</li>
<li><a href="../../../architecture/index.html">Architecture</a></li>
<li><a class="main-nav-link" href="https://github.com/dmlc/mxnet">Github</a></li>
<li id="dropdown-menu-position-anchor-version-mobile" class="dropdown-submenu" style="position: relative"><a href="#" tabindex="-1">Versions(master)</a><ul class="dropdown-menu"><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/>1.0.0</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/0.12.1/index.html>0.12.1</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/0.12.0/index.html>0.12.0</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/0.11.0/index.html>0.11.0</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/master/index.html>master</a></li></ul></li></ul>
</div>
<div class="plusIcon dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button"><span aria-hidden="true" class="glyphicon glyphicon-plus"></span></a>
<ul class="dropdown-menu dropdown-menu-right" id="plusMenu"></ul>
</div>
<div id="search-input-wrap">
<form action="../../../search.html" autocomplete="off" class="" method="get" role="search">
<div class="form-group inner-addon left-addon">
<i class="glyphicon glyphicon-search"></i>
<input class="form-control" name="q" placeholder="Search" type="text"/>
</div>
<input name="check_keywords" type="hidden" value="yes">
<input name="area" type="hidden" value="default"/>
</input></form>
<div id="search-preview"></div>
</div>
<div id="searchIcon">
<span aria-hidden="true" class="glyphicon glyphicon-search"></span>
</div>
<!-- <div id="lang-select-wrap"> -->
<!-- <label id="lang-select-label"> -->
<!-- <\!-- <i class="fa fa-globe"></i> -\-> -->
<!-- <span></span> -->
<!-- </label> -->
<!-- <select id="lang-select"> -->
<!-- <option value="en">Eng</option> -->
<!-- <option value="zh">中文</option> -->
<!-- </select> -->
<!-- </div> -->
<!-- <a id="mobile-nav-toggle">
<span class="mobile-nav-toggle-bar"></span>
<span class="mobile-nav-toggle-bar"></span>
<span class="mobile-nav-toggle-bar"></span>
</a> -->
</div>
</div>
</div>
<script type="text/javascript">
$('body').css('background', 'white');
</script>
<div class="container">
<div class="row">
<div aria-label="main navigation" class="sphinxsidebar leftsidebar" role="navigation">
<div class="sphinxsidebarwrapper">
<ul class="current">
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">Python Documents</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../index.html#ndarray-api">NDArray API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#symbol-api">Symbol API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#module-api">Module API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#autograd-api">Autograd API</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="../index.html#gluon-api">Gluon API</a><ul class="current">
<li class="toctree-l3 current"><a class="reference internal" href="gluon.html">Gluon Package</a><ul class="current">
<li class="toctree-l4 current"><a class="reference internal" href="gluon.html#overview">Overview</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#parameter">Parameter</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#containers">Containers</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#trainer">Trainer</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#utilities">Utilities</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#api-reference">API Reference</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="nn.html">Gluon Neural Network Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="rnn.html">Gluon Recurrent Neural Network API</a></li>
<li class="toctree-l3"><a class="reference internal" href="loss.html">Gluon Loss API</a></li>
<li class="toctree-l3"><a class="reference internal" href="data.html">Gluon Data API</a></li>
<li class="toctree-l3 current"><a class="current reference internal" href="">Gluon Model Zoo</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#overview">Overview</a></li>
<li class="toctree-l4"><a class="reference internal" href="#api-reference">API Reference</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="contrib.html">Gluon Contrib API</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#kvstore-api">KVStore API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#io-api">IO API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#image-api">Image API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#optimization-api">Optimization API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#callback-api">Callback API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#metric-api">Metric API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#run-time-compilation-api">Run-Time Compilation API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#contrib-package">Contrib Package</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../r/index.html">R Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../julia/index.html">Julia Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../c++/index.html">C++ Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../scala/index.html">Scala Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../perl/index.html">Perl Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../faq/index.html">HowTo Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/index.html">System Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tutorials/index.html">Tutorials</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/index.html">Community</a></li>
</ul>
</div>
</div>
<div class="content">
<div class="page-tracker"></div>
<div class="section" id="gluon-model-zoo">
<span id="gluon-model-zoo"></span><h1>Gluon Model Zoo<a class="headerlink" href="#gluon-model-zoo" title="Permalink to this headline"></a></h1>
<div class="section" id="overview">
<span id="overview"></span><h2>Overview<a class="headerlink" href="#overview" title="Permalink to this headline"></a></h2>
<p>This document lists the model APIs in Gluon:</p>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#module-mxnet.gluon.model_zoo" title="mxnet.gluon.model_zoo"><code class="xref py py-obj docutils literal"><span class="pre">mxnet.gluon.model_zoo</span></code></a></td>
<td>Predefined and pretrained models.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#module-mxnet.gluon.model_zoo.vision" title="mxnet.gluon.model_zoo.vision"><code class="xref py py-obj docutils literal"><span class="pre">mxnet.gluon.model_zoo.vision</span></code></a></td>
<td>Module for pre-defined neural network models.</td>
</tr>
</tbody>
</table>
<p>The <code class="docutils literal"><span class="pre">Gluon</span> <span class="pre">Model</span> <span class="pre">Zoo</span></code> API, defined in the <code class="docutils literal"><span class="pre">gluon.model_zoo</span></code> package, provides pre-defined
and pre-trained models to help bootstrap machine learning applications.</p>
<p>In the rest of this document, we list routines provided by the <code class="docutils literal"><span class="pre">gluon.model_zoo</span></code> package.</p>
<div class="section" id="module-mxnet.gluon.model_zoo.vision">
<span id="vision"></span><span id="vision"></span><h3>Vision<a class="headerlink" href="#module-mxnet.gluon.model_zoo.vision" title="Permalink to this headline"></a></h3>
<p>Module for pre-defined neural network models.</p>
<p>This module contains definitions for the following model architectures:
- <a class="reference external" href="https://arxiv.org/abs/1404.5997">AlexNet</a>
- <a class="reference external" href="https://arxiv.org/abs/1608.06993">DenseNet</a>
- <a class="reference external" href="http://arxiv.org/abs/1512.00567">Inception V3</a>
- <a class="reference external" href="https://arxiv.org/abs/1512.03385">ResNet V1</a>
- <a class="reference external" href="https://arxiv.org/abs/1512.03385">ResNet V2</a>
- <a class="reference external" href="https://arxiv.org/abs/1602.07360">SqueezeNet</a>
- <a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG</a>
- <a class="reference external" href="https://arxiv.org/abs/1704.04861">MobileNet</a></p>
<p>You can construct a model with random weights by calling its constructor:</p>
<div class="code highlight-python"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">mxnet.gluon.model_zoo</span> <span class="kn">import</span> <span class="n">vision</span>
<span class="n">resnet18</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">resnet18_v1</span><span class="p">()</span>
<span class="n">alexnet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">alexnet</span><span class="p">()</span>
<span class="n">squeezenet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">squeezenet1_0</span><span class="p">()</span>
<span class="n">densenet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">densenet_161</span><span class="p">()</span>
</pre></div>
</div>
<p>We provide pre-trained models for all the listed models.
These models can constructed by passing <code class="docutils literal"><span class="pre">pretrained=True</span></code>:</p>
<div class="code highlight-python"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">mxnet.gluon.model_zoo</span> <span class="kn">import</span> <span class="n">vision</span>
<span class="n">resnet18</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">resnet18_v1</span><span class="p">(</span><span class="n">pretrained</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span>
<span class="n">alexnet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">alexnet</span><span class="p">(</span><span class="n">pretrained</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span>
</pre></div>
</div>
<p>All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB images of shape (N x 3 x H x W),
where N is the batch size, and H and W are expected to be at least 224.
The images have to be loaded in to a range of [0, 1] and then normalized
using <code class="docutils literal"><span class="pre">mean</span> <span class="pre">=</span> <span class="pre">[0.485,</span> <span class="pre">0.456,</span> <span class="pre">0.406]</span></code> and <code class="docutils literal"><span class="pre">std</span> <span class="pre">=</span> <span class="pre">[0.229,</span> <span class="pre">0.224,</span> <span class="pre">0.225]</span></code>.
The transformation should preferrably happen at preprocessing. You can use
<code class="docutils literal"><span class="pre">mx.image.color_normalize</span></code> for such transformation:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">image</span> <span class="o">=</span> <span class="n">image</span><span class="o">/</span><span class="mi">255</span>
<span class="n">normalized</span> <span class="o">=</span> <span class="n">mx</span><span class="o">.</span><span class="n">image</span><span class="o">.</span><span class="n">color_normalize</span><span class="p">(</span><span class="n">image</span><span class="p">,</span>
<span class="n">mean</span><span class="o">=</span><span class="n">mx</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mf">0.485</span><span class="p">,</span> <span class="mf">0.456</span><span class="p">,</span> <span class="mf">0.406</span><span class="p">]),</span>
<span class="n">std</span><span class="o">=</span><span class="n">mx</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mf">0.229</span><span class="p">,</span> <span class="mf">0.224</span><span class="p">,</span> <span class="mf">0.225</span><span class="p">]))</span>
</pre></div>
</div>
<p>The following table summarizes the available models.</p>
<table border="1" class="docutils">
<colgroup>
<col width="17%"/>
<col width="17%"/>
<col width="17%"/>
<col width="17%"/>
<col width="17%"/>
<col width="17%"/>
</colgroup>
<thead valign="bottom">
<tr class="row-odd"><th class="head">Alias</th>
<th class="head">Network</th>
<th class="head"># Parameters</th>
<th class="head">Top-1 Accuracy</th>
<th class="head">Top-5 Accuracy</th>
<th class="head">Origin</th>
</tr>
</thead>
<tbody valign="top">
<tr class="row-even"><td>alexnet</td>
<td><a class="reference external" href="https://arxiv.org/abs/1404.5997">AlexNet</a></td>
<td>61,100,840</td>
<td>0.5492</td>
<td>0.7803</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>densenet121</td>
<td><a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">DenseNet-121</a></td>
<td>8,062,504</td>
<td>0.7497</td>
<td>0.9225</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>densenet161</td>
<td><a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">DenseNet-161</a></td>
<td>28,900,936</td>
<td>0.7770</td>
<td>0.9380</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>densenet169</td>
<td><a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">DenseNet-169</a></td>
<td>14,307,880</td>
<td>0.7617</td>
<td>0.9317</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>densenet201</td>
<td><a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">DenseNet-201</a></td>
<td>20,242,984</td>
<td>0.7732</td>
<td>0.9362</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>inceptionv3</td>
<td><a class="reference external" href="http://arxiv.org/abs/1512.00567">Inception V3 299x299</a></td>
<td>23,869,000</td>
<td>0.7755</td>
<td>0.9364</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>mobilenet0.25</td>
<td><a class="reference external" href="https://arxiv.org/abs/1704.04861">MobileNet 0.25</a></td>
<td>475,544</td>
<td>0.5185</td>
<td>0.7608</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-odd"><td>mobilenet0.5</td>
<td><a class="reference external" href="https://arxiv.org/abs/1704.04861">MobileNet 0.5</a></td>
<td>1,342,536</td>
<td>0.6307</td>
<td>0.8475</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-even"><td>mobilenet0.75</td>
<td><a class="reference external" href="https://arxiv.org/abs/1704.04861">MobileNet 0.75</a></td>
<td>2,601,976</td>
<td>0.6738</td>
<td>0.8782</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-odd"><td>mobilenet1.0</td>
<td><a class="reference external" href="https://arxiv.org/abs/1704.04861">MobileNet 1.0</a></td>
<td>4,253,864</td>
<td>0.7105</td>
<td>0.9006</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-even"><td>resnet18_v1</td>
<td><a class="reference external" href="http://arxiv.org/abs/1512.03385">ResNet-18 V1</a></td>
<td>11,699,112</td>
<td>0.6803</td>
<td>0.8818</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>resnet34_v1</td>
<td><a class="reference external" href="http://arxiv.org/abs/1512.03385">ResNet-34 V1</a></td>
<td>21,814,696</td>
<td>0.7202</td>
<td>0.9066</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>resnet50_v1</td>
<td><a class="reference external" href="http://arxiv.org/abs/1512.03385">ResNet-50 V1</a></td>
<td>25,629,032</td>
<td>0.7540</td>
<td>0.9266</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-odd"><td>resnet101_v1</td>
<td><a class="reference external" href="http://arxiv.org/abs/1512.03385">ResNet-101 V1</a></td>
<td>44,695,144</td>
<td>0.7693</td>
<td>0.9334</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-even"><td>resnet152_v1</td>
<td><a class="reference external" href="http://arxiv.org/abs/1512.03385">ResNet-152 V1</a></td>
<td>60,404,072</td>
<td>0.7727</td>
<td>0.9353</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-odd"><td>resnet18_v2</td>
<td><a class="reference external" href="https://arxiv.org/abs/1603.05027">ResNet-18 V2</a></td>
<td>11,695,796</td>
<td>0.6961</td>
<td>0.8901</td>
<td>Trained with <a class="reference external" href="https://github.com/apache/incubator-mxnet/blob/4dcd96ae2f6820e01455079d00f49db1cd21eda9/example/gluon/image_classification.py">script</a></td>
</tr>
<tr class="row-even"><td>resnet34_v2</td>
<td><a class="reference external" href="https://arxiv.org/abs/1603.05027">ResNet-34 V2</a></td>
<td>21,811,380</td>
<td>0.7324</td>
<td>0.9125</td>
<td>Trained with <a class="reference external" href="https://github.com/apache/incubator-mxnet/blob/4dcd96ae2f6820e01455079d00f49db1cd21eda9/example/gluon/image_classification.py">script</a></td>
</tr>
<tr class="row-odd"><td>resnet50_v2</td>
<td><a class="reference external" href="https://arxiv.org/abs/1603.05027">ResNet-50 V2</a></td>
<td>25,595,060</td>
<td>0.7622</td>
<td>0.9297</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-even"><td>resnet101_v2</td>
<td><a class="reference external" href="https://arxiv.org/abs/1603.05027">ResNet-101 V2</a></td>
<td>44,639,412</td>
<td>0.7747</td>
<td>0.9375</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-odd"><td>resnet152_v2</td>
<td><a class="reference external" href="https://arxiv.org/abs/1603.05027">ResNet-152 V2</a></td>
<td>60,329,140</td>
<td>0.7833</td>
<td>0.9409</td>
<td>Trained with <a class="reference external" href="https://github.com/zhreshold/mxnet/blob/2fbfdbcbacff8b738bd9f44e9c8cefc84d6dfbb5/example/gluon/train_imagenet.py">script</a></td>
</tr>
<tr class="row-even"><td>squeezenet1.0</td>
<td><a class="reference external" href="https://arxiv.org/abs/1602.07360">SqueezeNet 1.0</a></td>
<td>1,248,424</td>
<td>0.5611</td>
<td>0.7909</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>squeezenet1.1</td>
<td><a class="reference external" href="https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1">SqueezeNet 1.1</a></td>
<td>1,235,496</td>
<td>0.5496</td>
<td>0.7817</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>vgg11</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-11</a></td>
<td>132,863,336</td>
<td>0.6662</td>
<td>0.8734</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>vgg13</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-13</a></td>
<td>133,047,848</td>
<td>0.6774</td>
<td>0.8811</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>vgg16</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-16</a></td>
<td>138,357,544</td>
<td>0.6986</td>
<td>0.8945</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>vgg19</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-19</a></td>
<td>143,667,240</td>
<td>0.7072</td>
<td>0.8988</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>vgg11_bn</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-11 with batch normalization</a></td>
<td>132,874,344</td>
<td>0.6859</td>
<td>0.8872</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>vgg13_bn</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-13 with batch normalization</a></td>
<td>133,059,624</td>
<td>0.6884</td>
<td>0.8882</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-even"><td>vgg16_bn</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-16 with batch normalization</a></td>
<td>138,374,440</td>
<td>0.7142</td>
<td>0.9043</td>
<td>Converted from pytorch vision</td>
</tr>
<tr class="row-odd"><td>vgg19_bn</td>
<td><a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG-19 with batch normalization</a></td>
<td>143,689,256</td>
<td>0.7241</td>
<td>0.9093</td>
<td>Converted from pytorch vision</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.get_model" title="mxnet.gluon.model_zoo.vision.get_model"><code class="xref py py-obj docutils literal"><span class="pre">get_model</span></code></a></td>
<td>Returns a pre-defined model by name</td>
</tr>
</tbody>
</table>
<div class="section" id="resnet">
<span id="resnet"></span><h4>ResNet<a class="headerlink" href="#resnet" title="Permalink to this headline"></a></h4>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet18_v1" title="mxnet.gluon.model_zoo.vision.resnet18_v1"><code class="xref py py-obj docutils literal"><span class="pre">resnet18_v1</span></code></a></td>
<td>ResNet-18 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet34_v1" title="mxnet.gluon.model_zoo.vision.resnet34_v1"><code class="xref py py-obj docutils literal"><span class="pre">resnet34_v1</span></code></a></td>
<td>ResNet-34 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet50_v1" title="mxnet.gluon.model_zoo.vision.resnet50_v1"><code class="xref py py-obj docutils literal"><span class="pre">resnet50_v1</span></code></a></td>
<td>ResNet-50 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet101_v1" title="mxnet.gluon.model_zoo.vision.resnet101_v1"><code class="xref py py-obj docutils literal"><span class="pre">resnet101_v1</span></code></a></td>
<td>ResNet-101 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet152_v1" title="mxnet.gluon.model_zoo.vision.resnet152_v1"><code class="xref py py-obj docutils literal"><span class="pre">resnet152_v1</span></code></a></td>
<td>ResNet-152 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet18_v2" title="mxnet.gluon.model_zoo.vision.resnet18_v2"><code class="xref py py-obj docutils literal"><span class="pre">resnet18_v2</span></code></a></td>
<td>ResNet-18 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet34_v2" title="mxnet.gluon.model_zoo.vision.resnet34_v2"><code class="xref py py-obj docutils literal"><span class="pre">resnet34_v2</span></code></a></td>
<td>ResNet-34 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet50_v2" title="mxnet.gluon.model_zoo.vision.resnet50_v2"><code class="xref py py-obj docutils literal"><span class="pre">resnet50_v2</span></code></a></td>
<td>ResNet-50 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet101_v2" title="mxnet.gluon.model_zoo.vision.resnet101_v2"><code class="xref py py-obj docutils literal"><span class="pre">resnet101_v2</span></code></a></td>
<td>ResNet-101 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.resnet152_v2" title="mxnet.gluon.model_zoo.vision.resnet152_v2"><code class="xref py py-obj docutils literal"><span class="pre">resnet152_v2</span></code></a></td>
<td>ResNet-152 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.ResNetV1" title="mxnet.gluon.model_zoo.vision.ResNetV1"><code class="xref py py-obj docutils literal"><span class="pre">ResNetV1</span></code></a></td>
<td>ResNet V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.ResNetV2" title="mxnet.gluon.model_zoo.vision.ResNetV2"><code class="xref py py-obj docutils literal"><span class="pre">ResNetV2</span></code></a></td>
<td>ResNet V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.BasicBlockV1" title="mxnet.gluon.model_zoo.vision.BasicBlockV1"><code class="xref py py-obj docutils literal"><span class="pre">BasicBlockV1</span></code></a></td>
<td>BasicBlock V1 from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.BasicBlockV2" title="mxnet.gluon.model_zoo.vision.BasicBlockV2"><code class="xref py py-obj docutils literal"><span class="pre">BasicBlockV2</span></code></a></td>
<td>BasicBlock V2 from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.BottleneckV1" title="mxnet.gluon.model_zoo.vision.BottleneckV1"><code class="xref py py-obj docutils literal"><span class="pre">BottleneckV1</span></code></a></td>
<td>Bottleneck V1 from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.BottleneckV2" title="mxnet.gluon.model_zoo.vision.BottleneckV2"><code class="xref py py-obj docutils literal"><span class="pre">BottleneckV2</span></code></a></td>
<td>Bottleneck V2 from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.get_resnet" title="mxnet.gluon.model_zoo.vision.get_resnet"><code class="xref py py-obj docutils literal"><span class="pre">get_resnet</span></code></a></td>
<td>ResNet V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="vgg">
<span id="vgg"></span><h4>VGG<a class="headerlink" href="#vgg" title="Permalink to this headline"></a></h4>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg11" title="mxnet.gluon.model_zoo.vision.vgg11"><code class="xref py py-obj docutils literal"><span class="pre">vgg11</span></code></a></td>
<td>VGG-11 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg13" title="mxnet.gluon.model_zoo.vision.vgg13"><code class="xref py py-obj docutils literal"><span class="pre">vgg13</span></code></a></td>
<td>VGG-13 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg16" title="mxnet.gluon.model_zoo.vision.vgg16"><code class="xref py py-obj docutils literal"><span class="pre">vgg16</span></code></a></td>
<td>VGG-16 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg19" title="mxnet.gluon.model_zoo.vision.vgg19"><code class="xref py py-obj docutils literal"><span class="pre">vgg19</span></code></a></td>
<td>VGG-19 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg11_bn" title="mxnet.gluon.model_zoo.vision.vgg11_bn"><code class="xref py py-obj docutils literal"><span class="pre">vgg11_bn</span></code></a></td>
<td>VGG-11 model with batch normalization from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg13_bn" title="mxnet.gluon.model_zoo.vision.vgg13_bn"><code class="xref py py-obj docutils literal"><span class="pre">vgg13_bn</span></code></a></td>
<td>VGG-13 model with batch normalization from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg16_bn" title="mxnet.gluon.model_zoo.vision.vgg16_bn"><code class="xref py py-obj docutils literal"><span class="pre">vgg16_bn</span></code></a></td>
<td>VGG-16 model with batch normalization from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.vgg19_bn" title="mxnet.gluon.model_zoo.vision.vgg19_bn"><code class="xref py py-obj docutils literal"><span class="pre">vgg19_bn</span></code></a></td>
<td>VGG-19 model with batch normalization from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.VGG" title="mxnet.gluon.model_zoo.vision.VGG"><code class="xref py py-obj docutils literal"><span class="pre">VGG</span></code></a></td>
<td>VGG model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.get_vgg" title="mxnet.gluon.model_zoo.vision.get_vgg"><code class="xref py py-obj docutils literal"><span class="pre">get_vgg</span></code></a></td>
<td>VGG model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="alexnet">
<span id="alexnet"></span><h4>Alexnet<a class="headerlink" href="#alexnet" title="Permalink to this headline"></a></h4>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.alexnet" title="mxnet.gluon.model_zoo.vision.alexnet"><code class="xref py py-obj docutils literal"><span class="pre">alexnet</span></code></a></td>
<td>AlexNet model from the <a class="reference external" href="https://arxiv.org/abs/1404.5997">“One weird trick...”</a> paper.</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.AlexNet" title="mxnet.gluon.model_zoo.vision.AlexNet"><code class="xref py py-obj docutils literal"><span class="pre">AlexNet</span></code></a></td>
<td>AlexNet model from the <a class="reference external" href="https://arxiv.org/abs/1404.5997">“One weird trick...”</a> paper.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="densenet">
<span id="densenet"></span><h4>DenseNet<a class="headerlink" href="#densenet" title="Permalink to this headline"></a></h4>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.densenet121" title="mxnet.gluon.model_zoo.vision.densenet121"><code class="xref py py-obj docutils literal"><span class="pre">densenet121</span></code></a></td>
<td>Densenet-BC 121-layer model from the <a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.densenet161" title="mxnet.gluon.model_zoo.vision.densenet161"><code class="xref py py-obj docutils literal"><span class="pre">densenet161</span></code></a></td>
<td>Densenet-BC 161-layer model from the <a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.densenet169" title="mxnet.gluon.model_zoo.vision.densenet169"><code class="xref py py-obj docutils literal"><span class="pre">densenet169</span></code></a></td>
<td>Densenet-BC 169-layer model from the <a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.densenet201" title="mxnet.gluon.model_zoo.vision.densenet201"><code class="xref py py-obj docutils literal"><span class="pre">densenet201</span></code></a></td>
<td>Densenet-BC 201-layer model from the <a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.DenseNet" title="mxnet.gluon.model_zoo.vision.DenseNet"><code class="xref py py-obj docutils literal"><span class="pre">DenseNet</span></code></a></td>
<td>Densenet-BC model from the <a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="squeezenet">
<span id="squeezenet"></span><h4>SqueezeNet<a class="headerlink" href="#squeezenet" title="Permalink to this headline"></a></h4>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.squeezenet1_0" title="mxnet.gluon.model_zoo.vision.squeezenet1_0"><code class="xref py py-obj docutils literal"><span class="pre">squeezenet1_0</span></code></a></td>
<td>SqueezeNet 1.0 model from the <a class="reference external" href="https://arxiv.org/abs/1602.07360">“SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size”</a> paper.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.squeezenet1_1" title="mxnet.gluon.model_zoo.vision.squeezenet1_1"><code class="xref py py-obj docutils literal"><span class="pre">squeezenet1_1</span></code></a></td>
<td>SqueezeNet 1.1 model from the <a class="reference external" href="https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1">official SqueezeNet repo</a>.</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.SqueezeNet" title="mxnet.gluon.model_zoo.vision.SqueezeNet"><code class="xref py py-obj docutils literal"><span class="pre">SqueezeNet</span></code></a></td>
<td>SqueezeNet model from the <a class="reference external" href="https://arxiv.org/abs/1602.07360">“SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size”</a> paper.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="inception">
<span id="inception"></span><h4>Inception<a class="headerlink" href="#inception" title="Permalink to this headline"></a></h4>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.inception_v3" title="mxnet.gluon.model_zoo.vision.inception_v3"><code class="xref py py-obj docutils literal"><span class="pre">inception_v3</span></code></a></td>
<td>Inception v3 model from <a class="reference external" href="http://arxiv.org/abs/1512.00567">“Rethinking the Inception Architecture for Computer Vision”</a> paper.</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.Inception3" title="mxnet.gluon.model_zoo.vision.Inception3"><code class="xref py py-obj docutils literal"><span class="pre">Inception3</span></code></a></td>
<td>Inception v3 model from <a class="reference external" href="http://arxiv.org/abs/1512.00567">“Rethinking the Inception Architecture for Computer Vision”</a> paper.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="mobilenet">
<span id="mobilenet"></span><h4>MobileNet<a class="headerlink" href="#mobilenet" title="Permalink to this headline"></a></h4>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.mobilenet1_0" title="mxnet.gluon.model_zoo.vision.mobilenet1_0"><code class="xref py py-obj docutils literal"><span class="pre">mobilenet1_0</span></code></a></td>
<td>MobileNet model from the <a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 1.0.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.mobilenet0_75" title="mxnet.gluon.model_zoo.vision.mobilenet0_75"><code class="xref py py-obj docutils literal"><span class="pre">mobilenet0_75</span></code></a></td>
<td>MobileNet model from the <a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 0.75.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.mobilenet0_5" title="mxnet.gluon.model_zoo.vision.mobilenet0_5"><code class="xref py py-obj docutils literal"><span class="pre">mobilenet0_5</span></code></a></td>
<td>MobileNet model from the <a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 0.5.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.mobilenet0_25" title="mxnet.gluon.model_zoo.vision.mobilenet0_25"><code class="xref py py-obj docutils literal"><span class="pre">mobilenet0_25</span></code></a></td>
<td>MobileNet model from the <a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 0.25.</td>
</tr>
</tbody>
</table>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.model_zoo.vision.MobileNet" title="mxnet.gluon.model_zoo.vision.MobileNet"><code class="xref py py-obj docutils literal"><span class="pre">MobileNet</span></code></a></td>
<td>MobileNet model from the <a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper.</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
<div class="section" id="api-reference">
<span id="api-reference"></span><h2>API Reference<a class="headerlink" href="#api-reference" title="Permalink to this headline"></a></h2>
<script src="../../../_static/js/auto_module_index.js" type="text/javascript"></script><span class="target" id="module-mxnet.gluon.model_zoo"></span><p>Predefined and pretrained models.</p>
<span class="target" id="module-mxnet.gluon.model_zoo.vision"></span><p>Module for pre-defined neural network models.</p>
<p>This module contains definitions for the following model architectures:
- <a class="reference external" href="https://arxiv.org/abs/1404.5997">AlexNet</a>
- <a class="reference external" href="https://arxiv.org/abs/1608.06993">DenseNet</a>
- <a class="reference external" href="http://arxiv.org/abs/1512.00567">Inception V3</a>
- <a class="reference external" href="https://arxiv.org/abs/1512.03385">ResNet V1</a>
- <a class="reference external" href="https://arxiv.org/abs/1512.03385">ResNet V2</a>
- <a class="reference external" href="https://arxiv.org/abs/1602.07360">SqueezeNet</a>
- <a class="reference external" href="https://arxiv.org/abs/1409.1556">VGG</a>
- <a class="reference external" href="https://arxiv.org/abs/1704.04861">MobileNet</a></p>
<p>You can construct a model with random weights by calling its constructor:</p>
<div class="code highlight-python"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">mxnet.gluon.model_zoo</span> <span class="kn">import</span> <span class="n">vision</span>
<span class="n">resnet18</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">resnet18_v1</span><span class="p">()</span>
<span class="n">alexnet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">alexnet</span><span class="p">()</span>
<span class="n">squeezenet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">squeezenet1_0</span><span class="p">()</span>
<span class="n">densenet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">densenet_161</span><span class="p">()</span>
</pre></div>
</div>
<p>We provide pre-trained models for all the listed models.
These models can constructed by passing <code class="docutils literal"><span class="pre">pretrained=True</span></code>:</p>
<div class="code highlight-python"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">mxnet.gluon.model_zoo</span> <span class="kn">import</span> <span class="n">vision</span>
<span class="n">resnet18</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">resnet18_v1</span><span class="p">(</span><span class="n">pretrained</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span>
<span class="n">alexnet</span> <span class="o">=</span> <span class="n">vision</span><span class="o">.</span><span class="n">alexnet</span><span class="p">(</span><span class="n">pretrained</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span>
</pre></div>
</div>
<p>All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB images of shape (N x 3 x H x W),
where N is the batch size, and H and W are expected to be at least 224.
The images have to be loaded in to a range of [0, 1] and then normalized
using <code class="docutils literal"><span class="pre">mean</span> <span class="pre">=</span> <span class="pre">[0.485,</span> <span class="pre">0.456,</span> <span class="pre">0.406]</span></code> and <code class="docutils literal"><span class="pre">std</span> <span class="pre">=</span> <span class="pre">[0.229,</span> <span class="pre">0.224,</span> <span class="pre">0.225]</span></code>.
The transformation should preferrably happen at preprocessing. You can use
<code class="docutils literal"><span class="pre">mx.image.color_normalize</span></code> for such transformation:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="n">image</span> <span class="o">=</span> <span class="n">image</span><span class="o">/</span><span class="mi">255</span>
<span class="n">normalized</span> <span class="o">=</span> <span class="n">mx</span><span class="o">.</span><span class="n">image</span><span class="o">.</span><span class="n">color_normalize</span><span class="p">(</span><span class="n">image</span><span class="p">,</span>
<span class="n">mean</span><span class="o">=</span><span class="n">mx</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mf">0.485</span><span class="p">,</span> <span class="mf">0.456</span><span class="p">,</span> <span class="mf">0.406</span><span class="p">]),</span>
<span class="n">std</span><span class="o">=</span><span class="n">mx</span><span class="o">.</span><span class="n">nd</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="mf">0.229</span><span class="p">,</span> <span class="mf">0.224</span><span class="p">,</span> <span class="mf">0.225</span><span class="p">]))</span>
</pre></div>
</div>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.get_model">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">get_model</code><span class="sig-paren">(</span><em>name</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision.html#get_model"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.get_model" title="Permalink to this definition"></a></dt>
<dd><p>Returns a pre-defined model by name</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>name</strong> (<em>str</em>) – Name of the model.</li>
<li><strong>pretrained</strong> (<em>bool</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>classes</strong> (<em>int</em>) – Number of classes for the output layer.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body"><p class="first">The model.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last"><a class="reference internal" href="gluon.html#mxnet.gluon.HybridBlock" title="mxnet.gluon.HybridBlock">HybridBlock</a></p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.AlexNet">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">AlexNet</code><span class="sig-paren">(</span><em>classes=1000</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/alexnet.html#AlexNet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.AlexNet" title="Permalink to this definition"></a></dt>
<dd><p>AlexNet model from the <a class="reference external" href="https://arxiv.org/abs/1404.5997">“One weird trick...”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>classes</strong> (<em>int, default 1000</em>) – Number of classes for the output layer.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.BasicBlockV1">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">BasicBlockV1</code><span class="sig-paren">(</span><em>channels</em>, <em>stride</em>, <em>downsample=False</em>, <em>in_channels=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#BasicBlockV1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.BasicBlockV1" title="Permalink to this definition"></a></dt>
<dd><p>BasicBlock V1 from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.
This is used for ResNet V1 for 18, 34 layers.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>channels</strong> (<em>int</em>) – Number of output channels.</li>
<li><strong>stride</strong> (<em>int</em>) – Stride size.</li>
<li><strong>downsample</strong> (<em>bool, default False</em>) – Whether to downsample the input.</li>
<li><strong>in_channels</strong> (<em>int, default 0</em>) – Number of input channels. Default is 0, to infer from the graph.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.BasicBlockV2">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">BasicBlockV2</code><span class="sig-paren">(</span><em>channels</em>, <em>stride</em>, <em>downsample=False</em>, <em>in_channels=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#BasicBlockV2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.BasicBlockV2" title="Permalink to this definition"></a></dt>
<dd><p>BasicBlock V2 from
<a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.
This is used for ResNet V2 for 18, 34 layers.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>channels</strong> (<em>int</em>) – Number of output channels.</li>
<li><strong>stride</strong> (<em>int</em>) – Stride size.</li>
<li><strong>downsample</strong> (<em>bool, default False</em>) – Whether to downsample the input.</li>
<li><strong>in_channels</strong> (<em>int, default 0</em>) – Number of input channels. Default is 0, to infer from the graph.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.BottleneckV1">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">BottleneckV1</code><span class="sig-paren">(</span><em>channels</em>, <em>stride</em>, <em>downsample=False</em>, <em>in_channels=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#BottleneckV1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.BottleneckV1" title="Permalink to this definition"></a></dt>
<dd><p>Bottleneck V1 from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.
This is used for ResNet V1 for 50, 101, 152 layers.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>channels</strong> (<em>int</em>) – Number of output channels.</li>
<li><strong>stride</strong> (<em>int</em>) – Stride size.</li>
<li><strong>downsample</strong> (<em>bool, default False</em>) – Whether to downsample the input.</li>
<li><strong>in_channels</strong> (<em>int, default 0</em>) – Number of input channels. Default is 0, to infer from the graph.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.BottleneckV2">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">BottleneckV2</code><span class="sig-paren">(</span><em>channels</em>, <em>stride</em>, <em>downsample=False</em>, <em>in_channels=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#BottleneckV2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.BottleneckV2" title="Permalink to this definition"></a></dt>
<dd><p>Bottleneck V2 from
<a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.
This is used for ResNet V2 for 50, 101, 152 layers.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>channels</strong> (<em>int</em>) – Number of output channels.</li>
<li><strong>stride</strong> (<em>int</em>) – Stride size.</li>
<li><strong>downsample</strong> (<em>bool, default False</em>) – Whether to downsample the input.</li>
<li><strong>in_channels</strong> (<em>int, default 0</em>) – Number of input channels. Default is 0, to infer from the graph.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.DenseNet">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">DenseNet</code><span class="sig-paren">(</span><em>num_init_features</em>, <em>growth_rate</em>, <em>block_config</em>, <em>bn_size=4</em>, <em>dropout=0</em>, <em>classes=1000</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/densenet.html#DenseNet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.DenseNet" title="Permalink to this definition"></a></dt>
<dd><p>Densenet-BC model from the
<a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>num_init_features</strong> (<em>int</em>) – Number of filters to learn in the first convolution layer.</li>
<li><strong>growth_rate</strong> (<em>int</em>) – Number of filters to add each layer (<cite>k</cite> in the paper).</li>
<li><strong>block_config</strong> (<em>list of int</em>) – List of integers for numbers of layers in each pooling block.</li>
<li><strong>bn_size</strong> (<em>int, default 4</em>) – Multiplicative factor for number of bottle neck layers.
(i.e. bn_size * k features in the bottleneck layer)</li>
<li><strong>dropout</strong> (<em>float, default 0</em>) – Rate of dropout after each dense layer.</li>
<li><strong>classes</strong> (<em>int, default 1000</em>) – Number of classification classes.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.Inception3">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">Inception3</code><span class="sig-paren">(</span><em>classes=1000</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/inception.html#Inception3"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.Inception3" title="Permalink to this definition"></a></dt>
<dd><p>Inception v3 model from
<a class="reference external" href="http://arxiv.org/abs/1512.00567">“Rethinking the Inception Architecture for Computer Vision”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>classes</strong> (<em>int, default 1000</em>) – Number of classification classes.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.MobileNet">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">MobileNet</code><span class="sig-paren">(</span><em>multiplier=1.0</em>, <em>classes=1000</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/mobilenet.html#MobileNet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.MobileNet" title="Permalink to this definition"></a></dt>
<dd><p>MobileNet model from the
<a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>multiplier</strong> (<em>float, default 1.0</em>) – The width multiplier for controling the model size. Only multipliers that are no
less than 0.25 are supported. The actual number of channels is equal to the original
channel size multiplied by this multiplier.</li>
<li><strong>classes</strong> (<em>int, default 1000</em>) – Number of classes for the output layer.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.ResNetV1">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">ResNetV1</code><span class="sig-paren">(</span><em>block</em>, <em>layers</em>, <em>channels</em>, <em>classes=1000</em>, <em>thumbnail=False</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#ResNetV1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.ResNetV1" title="Permalink to this definition"></a></dt>
<dd><p>ResNet V1 model from
<a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>block</strong> (<a class="reference internal" href="gluon.html#mxnet.gluon.HybridBlock" title="mxnet.gluon.HybridBlock"><em>HybridBlock</em></a>) – Class for the residual block. Options are BasicBlockV1, BottleneckV1.</li>
<li><strong>layers</strong> (<em>list of int</em>) – Numbers of layers in each block</li>
<li><strong>channels</strong> (<em>list of int</em>) – Numbers of channels in each block. Length should be one larger than layers list.</li>
<li><strong>classes</strong> (<em>int, default 1000</em>) – Number of classification classes.</li>
<li><strong>thumbnail</strong> (<em>bool, default False</em>) – Enable thumbnail.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.ResNetV2">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">ResNetV2</code><span class="sig-paren">(</span><em>block</em>, <em>layers</em>, <em>channels</em>, <em>classes=1000</em>, <em>thumbnail=False</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#ResNetV2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.ResNetV2" title="Permalink to this definition"></a></dt>
<dd><p>ResNet V2 model from
<a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>block</strong> (<a class="reference internal" href="gluon.html#mxnet.gluon.HybridBlock" title="mxnet.gluon.HybridBlock"><em>HybridBlock</em></a>) – Class for the residual block. Options are BasicBlockV1, BottleneckV1.</li>
<li><strong>layers</strong> (<em>list of int</em>) – Numbers of layers in each block</li>
<li><strong>channels</strong> (<em>list of int</em>) – Numbers of channels in each block. Length should be one larger than layers list.</li>
<li><strong>classes</strong> (<em>int, default 1000</em>) – Number of classification classes.</li>
<li><strong>thumbnail</strong> (<em>bool, default False</em>) – Enable thumbnail.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.SqueezeNet">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">SqueezeNet</code><span class="sig-paren">(</span><em>version</em>, <em>classes=1000</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/squeezenet.html#SqueezeNet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.SqueezeNet" title="Permalink to this definition"></a></dt>
<dd><p>SqueezeNet model from the <a class="reference external" href="https://arxiv.org/abs/1602.07360">“SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size”</a> paper.
SqueezeNet 1.1 model from the <a class="reference external" href="https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1">official SqueezeNet repo</a>.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>version</strong> (<em>str</em>) – Version of squeezenet. Options are ‘1.0’, ‘1.1’.</li>
<li><strong>classes</strong> (<em>int, default 1000</em>) – Number of classification classes.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.model_zoo.vision.VGG">
<em class="property">class </em><code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">VGG</code><span class="sig-paren">(</span><em>layers</em>, <em>filters</em>, <em>classes=1000</em>, <em>batch_norm=False</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#VGG"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.VGG" title="Permalink to this definition"></a></dt>
<dd><p>VGG model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>layers</strong> (<em>list of int</em>) – Numbers of layers in each feature block.</li>
<li><strong>filters</strong> (<em>list of int</em>) – Numbers of filters in each feature block. List length should match the layers.</li>
<li><strong>classes</strong> (<em>int, default 1000</em>) – Number of classification classes.</li>
<li><strong>batch_norm</strong> (<em>bool, default False</em>) – Use batch normalization.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.alexnet">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">alexnet</code><span class="sig-paren">(</span><em>pretrained=False</em>, <em>ctx=cpu(0)</em>, <em>root='~/.mxnet/models'</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/alexnet.html#alexnet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.alexnet" title="Permalink to this definition"></a></dt>
<dd><p>AlexNet model from the <a class="reference external" href="https://arxiv.org/abs/1404.5997">“One weird trick...”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.densenet121">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">densenet121</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/densenet.html#densenet121"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.densenet121" title="Permalink to this definition"></a></dt>
<dd><p>Densenet-BC 121-layer model from the
<a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.densenet161">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">densenet161</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/densenet.html#densenet161"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.densenet161" title="Permalink to this definition"></a></dt>
<dd><p>Densenet-BC 161-layer model from the
<a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.densenet169">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">densenet169</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/densenet.html#densenet169"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.densenet169" title="Permalink to this definition"></a></dt>
<dd><p>Densenet-BC 169-layer model from the
<a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.densenet201">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">densenet201</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/densenet.html#densenet201"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.densenet201" title="Permalink to this definition"></a></dt>
<dd><p>Densenet-BC 201-layer model from the
<a class="reference external" href="https://arxiv.org/pdf/1608.06993.pdf">“Densely Connected Convolutional Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.get_mobilenet">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">get_mobilenet</code><span class="sig-paren">(</span><em>multiplier</em>, <em>pretrained=False</em>, <em>ctx=cpu(0)</em>, <em>root='~/.mxnet/models'</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/mobilenet.html#get_mobilenet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.get_mobilenet" title="Permalink to this definition"></a></dt>
<dd><p>MobileNet model from the
<a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>multiplier</strong> (<em>float</em>) – The width multiplier for controling the model size. Only multipliers that are no
less than 0.25 are supported. The actual number of channels is equal to the original
channel size multiplied by this multiplier.</li>
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.get_resnet">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">get_resnet</code><span class="sig-paren">(</span><em>version</em>, <em>num_layers</em>, <em>pretrained=False</em>, <em>ctx=cpu(0)</em>, <em>root='~/.mxnet/models'</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#get_resnet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.get_resnet" title="Permalink to this definition"></a></dt>
<dd><p>ResNet V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.
ResNet V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>version</strong> (<em>int</em>) – Version of ResNet. Options are 1, 2.</li>
<li><strong>num_layers</strong> (<em>int</em>) – Numbers of layers. Options are 18, 34, 50, 101, 152.</li>
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.get_vgg">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">get_vgg</code><span class="sig-paren">(</span><em>num_layers</em>, <em>pretrained=False</em>, <em>ctx=cpu(0)</em>, <em>root='~/.mxnet/models'</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#get_vgg"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.get_vgg" title="Permalink to this definition"></a></dt>
<dd><p>VGG model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>num_layers</strong> (<em>int</em>) – Number of layers for the variant of densenet. Options are 11, 13, 16, 19.</li>
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.inception_v3">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">inception_v3</code><span class="sig-paren">(</span><em>pretrained=False</em>, <em>ctx=cpu(0)</em>, <em>root='~/.mxnet/models'</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/inception.html#inception_v3"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.inception_v3" title="Permalink to this definition"></a></dt>
<dd><p>Inception v3 model from
<a class="reference external" href="http://arxiv.org/abs/1512.00567">“Rethinking the Inception Architecture for Computer Vision”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.mobilenet0_25">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">mobilenet0_25</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/mobilenet.html#mobilenet0_25"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.mobilenet0_25" title="Permalink to this definition"></a></dt>
<dd><p>MobileNet model from the
<a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 0.25.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.mobilenet0_5">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">mobilenet0_5</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/mobilenet.html#mobilenet0_5"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.mobilenet0_5" title="Permalink to this definition"></a></dt>
<dd><p>MobileNet model from the
<a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 0.5.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.mobilenet0_75">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">mobilenet0_75</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/mobilenet.html#mobilenet0_75"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.mobilenet0_75" title="Permalink to this definition"></a></dt>
<dd><p>MobileNet model from the
<a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 0.75.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.mobilenet1_0">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">mobilenet1_0</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/mobilenet.html#mobilenet1_0"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.mobilenet1_0" title="Permalink to this definition"></a></dt>
<dd><p>MobileNet model from the
<a class="reference external" href="https://arxiv.org/abs/1704.04861">“MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications”</a> paper, with width multiplier 1.0.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet101_v1">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet101_v1</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet101_v1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet101_v1" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-101 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet101_v2">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet101_v2</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet101_v2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet101_v2" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-101 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet152_v1">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet152_v1</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet152_v1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet152_v1" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-152 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet152_v2">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet152_v2</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet152_v2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet152_v2" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-152 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet18_v1">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet18_v1</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet18_v1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet18_v1" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-18 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet18_v2">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet18_v2</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet18_v2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet18_v2" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-18 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet34_v1">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet34_v1</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet34_v1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet34_v1" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-34 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet34_v2">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet34_v2</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet34_v2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet34_v2" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-34 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet50_v1">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet50_v1</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet50_v1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet50_v1" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-50 V1 model from <a class="reference external" href="http://arxiv.org/abs/1512.03385">“Deep Residual Learning for Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.resnet50_v2">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">resnet50_v2</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/resnet.html#resnet50_v2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.resnet50_v2" title="Permalink to this definition"></a></dt>
<dd><p>ResNet-50 V2 model from <a class="reference external" href="https://arxiv.org/abs/1603.05027">“Identity Mappings in Deep Residual Networks”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.squeezenet1_0">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">squeezenet1_0</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/squeezenet.html#squeezenet1_0"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.squeezenet1_0" title="Permalink to this definition"></a></dt>
<dd><p>SqueezeNet 1.0 model from the <a class="reference external" href="https://arxiv.org/abs/1602.07360">“SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.squeezenet1_1">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">squeezenet1_1</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/squeezenet.html#squeezenet1_1"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.squeezenet1_1" title="Permalink to this definition"></a></dt>
<dd><p>SqueezeNet 1.1 model from the <a class="reference external" href="https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1">official SqueezeNet repo</a>.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg11">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg11</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg11"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg11" title="Permalink to this definition"></a></dt>
<dd><p>VGG-11 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg11_bn">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg11_bn</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg11_bn"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg11_bn" title="Permalink to this definition"></a></dt>
<dd><p>VGG-11 model with batch normalization from the
<a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg13">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg13</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg13"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg13" title="Permalink to this definition"></a></dt>
<dd><p>VGG-13 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg13_bn">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg13_bn</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg13_bn"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg13_bn" title="Permalink to this definition"></a></dt>
<dd><p>VGG-13 model with batch normalization from the
<a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg16">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg16</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg16"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg16" title="Permalink to this definition"></a></dt>
<dd><p>VGG-16 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg16_bn">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg16_bn</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg16_bn"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg16_bn" title="Permalink to this definition"></a></dt>
<dd><p>VGG-16 model with batch normalization from the
<a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg19">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg19</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg19"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg19" title="Permalink to this definition"></a></dt>
<dd><p>VGG-19 model from the <a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="mxnet.gluon.model_zoo.vision.vgg19_bn">
<code class="descclassname">mxnet.gluon.model_zoo.vision.</code><code class="descname">vgg19_bn</code><span class="sig-paren">(</span><em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/model_zoo/vision/vgg.html#vgg19_bn"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.model_zoo.vision.vgg19_bn" title="Permalink to this definition"></a></dt>
<dd><p>VGG-19 model with batch normalization from the
<a class="reference external" href="https://arxiv.org/abs/1409.1556">“Very Deep Convolutional Networks for Large-Scale Image Recognition”</a> paper.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>pretrained</strong> (<em>bool, default False</em>) – Whether to load the pretrained weights for model.</li>
<li><strong>ctx</strong> (<em>Context, default CPU</em>) – The context in which to load the pretrained weights.</li>
<li><strong>root</strong> (<em>str, default '~/.mxnet/models'</em>) – Location for keeping the model parameters.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<script>auto_index("api-reference");</script></div>
</div>
</div>
</div>
<div aria-label="main navigation" class="sphinxsidebar rightsidebar" role="navigation">
<div class="sphinxsidebarwrapper">
<h3><a href="../../../index.html">Table Of Contents</a></h3>
<ul>
<li><a class="reference internal" href="#">Gluon Model Zoo</a><ul>
<li><a class="reference internal" href="#overview">Overview</a><ul>
<li><a class="reference internal" href="#module-mxnet.gluon.model_zoo.vision">Vision</a><ul>
<li><a class="reference internal" href="#resnet">ResNet</a></li>
<li><a class="reference internal" href="#vgg">VGG</a></li>
<li><a class="reference internal" href="#alexnet">Alexnet</a></li>
<li><a class="reference internal" href="#densenet">DenseNet</a></li>
<li><a class="reference internal" href="#squeezenet">SqueezeNet</a></li>
<li><a class="reference internal" href="#inception">Inception</a></li>
<li><a class="reference internal" href="#mobilenet">MobileNet</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#api-reference">API Reference</a></li>
</ul>
</li>
</ul>
</div>
</div>
</div><div class="footer">
<div class="section-disclaimer">
<div class="container">
<div>
<img height="60" src="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/apache_incubator_logo.png"/>
<p>
Apache MXNet is an effort undergoing incubation at The Apache Software Foundation (ASF), <strong>sponsored by the <i>Apache Incubator</i></strong>. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
</p>
<p>
"Copyright © 2017-2018, The Apache Software Foundation
Apache MXNet, MXNet, Apache, the Apache feather, and the Apache MXNet project logo are either registered trademarks or trademarks of the Apache Software Foundation."
</p>
</div>
</div>
</div>
</div> <!-- pagename != index -->
</div>
<script crossorigin="anonymous" integrity="sha384-0mSbJDEHialfmuBBQP6A4Qrprq5OVfW37PRR3j5ELqxss1yVqOtnepnHVP9aJ7xS" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js"></script>
<script src="../../../_static/js/sidebar.js" type="text/javascript"></script>
<script src="../../../_static/js/search.js" type="text/javascript"></script>
<script src="../../../_static/js/navbar.js" type="text/javascript"></script>
<script src="../../../_static/js/clipboard.min.js" type="text/javascript"></script>
<script src="../../../_static/js/copycode.js" type="text/javascript"></script>
<script src="../../../_static/js/page.js" type="text/javascript"></script>
<script type="text/javascript">
$('body').ready(function () {
$('body').css('visibility', 'visible');
});
</script>
</body>
</html>