blob: ae03e3a78c89998ce0f2a4e20bec218ba89cf555 [file] [log] [blame]
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta content="IE=edge" http-equiv="X-UA-Compatible"/>
<meta content="width=device-width, initial-scale=1" name="viewport"/>
<title>Gluon Loss API — mxnet documentation</title>
<link crossorigin="anonymous" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" rel="stylesheet"/>
<link href="https://maxcdn.bootstrapcdn.com/font-awesome/4.5.0/css/font-awesome.min.css" rel="stylesheet"/>
<link href="../../../_static/basic.css" rel="stylesheet" type="text/css"/>
<link href="../../../_static/pygments.css" rel="stylesheet" type="text/css"/>
<link href="../../../_static/mxnet.css" rel="stylesheet" type="text/css">
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../../../',
VERSION: '',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: ''
};
</script>
<script src="https://code.jquery.com/jquery-1.11.1.min.js" type="text/javascript"></script>
<script src="../../../_static/underscore.js" type="text/javascript"></script>
<script src="../../../_static/searchtools_custom.js" type="text/javascript"></script>
<script src="../../../_static/doctools.js" type="text/javascript"></script>
<script src="../../../_static/selectlang.js" type="text/javascript"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS-MML_HTMLorMML" type="text/javascript"></script>
<script type="text/javascript"> jQuery(function() { Search.loadIndex("/searchindex.js"); Search.init();}); </script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new
Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-96378503-1', 'auto');
ga('send', 'pageview');
</script>
<!-- -->
<!-- <script type="text/javascript" src="../../../_static/jquery.js"></script> -->
<!-- -->
<!-- <script type="text/javascript" src="../../../_static/underscore.js"></script> -->
<!-- -->
<!-- <script type="text/javascript" src="../../../_static/doctools.js"></script> -->
<!-- -->
<!-- <script type="text/javascript" src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"></script> -->
<!-- -->
<link href="gluon.html" rel="up" title="Gluon Package"/>
<link href="data.html" rel="next" title="Gluon Data API">
<link href="rnn.html" rel="prev" title="Gluon Recurrent Neural Network API">
<link href="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/mxnet-icon.png" rel="icon" type="image/png"/>
</link></link></link></head>
<body background="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/mxnet-background-compressed.jpeg" role="document">
<div class="content-block"><div class="navbar navbar-fixed-top">
<div class="container" id="navContainer">
<div class="innder" id="header-inner">
<h1 id="logo-wrap">
<a href="../../../" id="logo"><img src="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/mxnet_logo.png"/></a>
</h1>
<nav class="nav-bar" id="main-nav">
<a class="main-nav-link" href="../../../install/index.html">Install</a>
<a class="main-nav-link" href="../../../tutorials/index.html">Tutorials</a>
<span id="dropdown-menu-position-anchor">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">Gluon <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu">
<li><a class="main-nav-link" href="../../../gluon/index.html">About</a></li>
<li><a class="main-nav-link" href="http://gluon.mxnet.io">Tutorials</a></li>
</ul>
</span>
<span id="dropdown-menu-position-anchor">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">API <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu">
<li><a class="main-nav-link" href="../../../api/python/index.html">Python</a></li>
<li><a class="main-nav-link" href="../../../api/scala/index.html">Scala</a></li>
<li><a class="main-nav-link" href="../../../api/r/index.html">R</a></li>
<li><a class="main-nav-link" href="../../../api/julia/index.html">Julia</a></li>
<li><a class="main-nav-link" href="../../../api/c++/index.html">C++</a></li>
<li><a class="main-nav-link" href="../../../api/perl/index.html">Perl</a></li>
</ul>
</span>
<span id="dropdown-menu-position-anchor-docs">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">Docs <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu-docs">
<li><a class="main-nav-link" href="../../../faq/index.html">FAQ</a></li>
<li><a class="main-nav-link" href="../../../architecture/index.html">Architecture</a></li>
<li><a class="main-nav-link" href="https://github.com/apache/incubator-mxnet/tree/1.0.0/example">Examples</a></li>
<li><a class="main-nav-link" href="../../../model_zoo/index.html">Model Zoo</a></li>
</ul>
</span>
<a class="main-nav-link" href="https://github.com/dmlc/mxnet">Github</a>
<span id="dropdown-menu-position-anchor-community">
<a aria-expanded="true" aria-haspopup="true" class="main-nav-link dropdown-toggle" data-toggle="dropdown" href="#" role="button">Community <span class="caret"></span></a>
<ul class="dropdown-menu navbar-menu" id="package-dropdown-menu-community">
<li><a class="main-nav-link" href="../../../community/index.html">Community</a></li>
<li><a class="main-nav-link" href="../../../community/contribute.html">Contribute</a></li>
<li><a class="main-nav-link" href="../../../community/powered_by.html">Powered By</a></li>
</ul>
</span>
<a class="main-nav-link" href="http://discuss.mxnet.io">Discuss</a>
<span id="dropdown-menu-position-anchor-version" style="position: relative"><a href="#" class="main-nav-link dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="true">Versions(1.0.0)<span class="caret"></span></a><ul id="package-dropdown-menu" class="dropdown-menu"><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/>1.1.0</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/1.0.0/index.html>1.0.0</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/0.12.1/index.html>0.12.1</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/0.12.0/index.html>0.12.0</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/0.11.0/index.html>0.11.0</a></li><li><a class="main-nav-link" href=https://mxnet.incubator.apache.org/versions/master/index.html>master</a></li></ul></span></nav>
<script> function getRootPath(){ return "../../../" } </script>
<div class="burgerIcon dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button"></a>
<ul class="dropdown-menu" id="burgerMenu">
<li><a href="../../../install/index.html">Install</a></li>
<li><a class="main-nav-link" href="../../../tutorials/index.html">Tutorials</a></li>
<li class="dropdown-submenu">
<a href="#" tabindex="-1">Community</a>
<ul class="dropdown-menu">
<li><a href="../../../community/index.html" tabindex="-1">Community</a></li>
<li><a href="../../../community/contribute.html" tabindex="-1">Contribute</a></li>
<li><a href="../../../community/powered_by.html" tabindex="-1">Powered By</a></li>
</ul>
</li>
<li class="dropdown-submenu">
<a href="#" tabindex="-1">API</a>
<ul class="dropdown-menu">
<li><a href="../../../api/python/index.html" tabindex="-1">Python</a>
</li>
<li><a href="../../../api/scala/index.html" tabindex="-1">Scala</a>
</li>
<li><a href="../../../api/r/index.html" tabindex="-1">R</a>
</li>
<li><a href="../../../api/julia/index.html" tabindex="-1">Julia</a>
</li>
<li><a href="../../../api/c++/index.html" tabindex="-1">C++</a>
</li>
<li><a href="../../../api/perl/index.html" tabindex="-1">Perl</a>
</li>
</ul>
</li>
<li class="dropdown-submenu">
<a href="#" tabindex="-1">Docs</a>
<ul class="dropdown-menu">
<li><a href="../../../tutorials/index.html" tabindex="-1">Tutorials</a></li>
<li><a href="../../../faq/index.html" tabindex="-1">FAQ</a></li>
<li><a href="../../../architecture/index.html" tabindex="-1">Architecture</a></li>
<li><a href="https://github.com/apache/incubator-mxnet/tree/1.0.0/example" tabindex="-1">Examples</a></li>
<li><a href="../../../model_zoo/index.html" tabindex="-1">Model Zoo</a></li>
</ul>
</li>
<li><a href="../../../architecture/index.html">Architecture</a></li>
<li><a class="main-nav-link" href="https://github.com/dmlc/mxnet">Github</a></li>
<li id="dropdown-menu-position-anchor-version-mobile" class="dropdown-submenu" style="position: relative"><a href="#" tabindex="-1">Versions(1.0.0)</a><ul class="dropdown-menu"><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/>1.1.0</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/1.0.0/index.html>1.0.0</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/0.12.1/index.html>0.12.1</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/0.12.0/index.html>0.12.0</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/0.11.0/index.html>0.11.0</a></li><li><a tabindex="-1" href=https://mxnet.incubator.apache.org/versions/master/index.html>master</a></li></ul></li></ul>
</div>
<div class="plusIcon dropdown">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button"><span aria-hidden="true" class="glyphicon glyphicon-plus"></span></a>
<ul class="dropdown-menu dropdown-menu-right" id="plusMenu"></ul>
</div>
<div id="search-input-wrap">
<form action="../../../search.html" autocomplete="off" class="" method="get" role="search">
<div class="form-group inner-addon left-addon">
<i class="glyphicon glyphicon-search"></i>
<input class="form-control" name="q" placeholder="Search" type="text"/>
</div>
<input name="check_keywords" type="hidden" value="yes"/>
<input name="area" type="hidden" value="default">
</input></form>
<div id="search-preview"></div>
</div>
<div id="searchIcon">
<span aria-hidden="true" class="glyphicon glyphicon-search"></span>
</div>
<!-- <div id="lang-select-wrap"> -->
<!-- <label id="lang-select-label"> -->
<!-- <\!-- <i class="fa fa-globe"></i> -\-> -->
<!-- <span></span> -->
<!-- </label> -->
<!-- <select id="lang-select"> -->
<!-- <option value="en">Eng</option> -->
<!-- <option value="zh">中文</option> -->
<!-- </select> -->
<!-- </div> -->
<!-- <a id="mobile-nav-toggle">
<span class="mobile-nav-toggle-bar"></span>
<span class="mobile-nav-toggle-bar"></span>
<span class="mobile-nav-toggle-bar"></span>
</a> -->
</div>
</div>
</div>
<script type="text/javascript">
$('body').css('background', 'white');
</script>
<div class="container">
<div class="row">
<div aria-label="main navigation" class="sphinxsidebar leftsidebar" role="navigation">
<div class="sphinxsidebarwrapper">
<ul class="current">
<li class="toctree-l1 current"><a class="reference internal" href="../index.html">Python Documents</a><ul class="current">
<li class="toctree-l2"><a class="reference internal" href="../index.html#ndarray-api">NDArray API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#symbol-api">Symbol API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#module-api">Module API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#autograd-api">Autograd API</a></li>
<li class="toctree-l2 current"><a class="reference internal" href="../index.html#gluon-api">Gluon API</a><ul class="current">
<li class="toctree-l3 current"><a class="reference internal" href="gluon.html">Gluon Package</a><ul class="current">
<li class="toctree-l4 current"><a class="reference internal" href="gluon.html#overview">Overview</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#parameter">Parameter</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#containers">Containers</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#trainer">Trainer</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#utilities">Utilities</a></li>
<li class="toctree-l4"><a class="reference internal" href="gluon.html#api-reference">API Reference</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="nn.html">Gluon Neural Network Layers</a></li>
<li class="toctree-l3"><a class="reference internal" href="rnn.html">Gluon Recurrent Neural Network API</a></li>
<li class="toctree-l3 current"><a class="current reference internal" href="">Gluon Loss API</a><ul>
<li class="toctree-l4"><a class="reference internal" href="#overview">Overview</a></li>
<li class="toctree-l4"><a class="reference internal" href="#api-reference">API Reference</a></li>
</ul>
</li>
<li class="toctree-l3"><a class="reference internal" href="data.html">Gluon Data API</a></li>
<li class="toctree-l3"><a class="reference internal" href="model_zoo.html">Gluon Model Zoo</a></li>
<li class="toctree-l3"><a class="reference internal" href="contrib.html">Gluon Contrib API</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#kvstore-api">KVStore API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#io-api">IO API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#image-api">Image API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#optimization-api">Optimization API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#callback-api">Callback API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#metric-api">Metric API</a></li>
<li class="toctree-l2"><a class="reference internal" href="../index.html#run-time-compilation-api">Run-Time Compilation API</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="../../r/index.html">R Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../julia/index.html">Julia Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../c++/index.html">C++ Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../scala/index.html">Scala Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../perl/index.html">Perl Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../faq/index.html">HowTo Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../architecture/index.html">System Documents</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tutorials/index.html">Tutorials</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/index.html">Community</a></li>
</ul>
</div>
</div>
<div class="content">
<div class="page-tracker"></div>
<div class="section" id="gluon-loss-api">
<span id="gluon-loss-api"></span><h1>Gluon Loss API<a class="headerlink" href="#gluon-loss-api" title="Permalink to this headline"></a></h1>
<div class="section" id="overview">
<span id="overview"></span><h2>Overview<a class="headerlink" href="#overview" title="Permalink to this headline"></a></h2>
<p>This document lists the loss API in Gluon:</p>
<p>This package includes several commonly used loss functions in neural networks.</p>
<table border="1" class="longtable docutils">
<colgroup>
<col width="10%"/>
<col width="90%"/>
</colgroup>
<tbody valign="top">
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.loss.L2Loss" title="mxnet.gluon.loss.L2Loss"><code class="xref py py-obj docutils literal"><span class="pre">L2Loss</span></code></a></td>
<td>Calculates the mean squared error between <cite>pred</cite> and <cite>label</cite>.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.loss.L1Loss" title="mxnet.gluon.loss.L1Loss"><code class="xref py py-obj docutils literal"><span class="pre">L1Loss</span></code></a></td>
<td>Calculates the mean absolute error between <cite>pred</cite> and <cite>label</cite>.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss" title="mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss"><code class="xref py py-obj docutils literal"><span class="pre">SigmoidBinaryCrossEntropyLoss</span></code></a></td>
<td>The cross-entropy loss for binary classification.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.loss.SoftmaxCrossEntropyLoss" title="mxnet.gluon.loss.SoftmaxCrossEntropyLoss"><code class="xref py py-obj docutils literal"><span class="pre">SoftmaxCrossEntropyLoss</span></code></a></td>
<td>Computes the softmax cross entropy loss.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss" title="mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss"><code class="xref py py-obj docutils literal"><span class="pre">SigmoidBinaryCrossEntropyLoss</span></code></a></td>
<td>The cross-entropy loss for binary classification.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.loss.KLDivLoss" title="mxnet.gluon.loss.KLDivLoss"><code class="xref py py-obj docutils literal"><span class="pre">KLDivLoss</span></code></a></td>
<td>The Kullback-Leibler divergence loss.</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.loss.HuberLoss" title="mxnet.gluon.loss.HuberLoss"><code class="xref py py-obj docutils literal"><span class="pre">HuberLoss</span></code></a></td>
<td>Calculates smoothed L1 loss that is equal to L1 loss if absolute error exceeds rho but is equal to L2 loss otherwise.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.loss.HingeLoss" title="mxnet.gluon.loss.HingeLoss"><code class="xref py py-obj docutils literal"><span class="pre">HingeLoss</span></code></a></td>
<td>Calculates the hinge loss function often used in SVMs:</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.loss.SquaredHingeLoss" title="mxnet.gluon.loss.SquaredHingeLoss"><code class="xref py py-obj docutils literal"><span class="pre">SquaredHingeLoss</span></code></a></td>
<td>Calculates the soft-margin loss function used in SVMs:</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.loss.LogisticLoss" title="mxnet.gluon.loss.LogisticLoss"><code class="xref py py-obj docutils literal"><span class="pre">LogisticLoss</span></code></a></td>
<td>Calculates the logistic loss (for binary losses only):</td>
</tr>
<tr class="row-odd"><td><a class="reference internal" href="#mxnet.gluon.loss.TripletLoss" title="mxnet.gluon.loss.TripletLoss"><code class="xref py py-obj docutils literal"><span class="pre">TripletLoss</span></code></a></td>
<td>Calculates triplet loss given three input tensors and a positive margin.</td>
</tr>
<tr class="row-even"><td><a class="reference internal" href="#mxnet.gluon.loss.CTCLoss" title="mxnet.gluon.loss.CTCLoss"><code class="xref py py-obj docutils literal"><span class="pre">CTCLoss</span></code></a></td>
<td>Connectionist Temporal Classification Loss.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="api-reference">
<span id="api-reference"></span><h2>API Reference<a class="headerlink" href="#api-reference" title="Permalink to this headline"></a></h2>
<script src="../../_static/js/auto_module_index.js" type="text/javascript"></script><span class="target" id="module-mxnet.gluon.loss"></span><p>losses for training neural networks</p>
<dl class="class">
<dt id="mxnet.gluon.loss.Loss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">Loss</code><span class="sig-paren">(</span><em>weight</em>, <em>batch_axis</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#Loss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.Loss" title="Permalink to this definition"></a></dt>
<dd><p>Base class for loss.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="method">
<dt id="mxnet.gluon.loss.Loss.hybrid_forward">
<code class="descname">hybrid_forward</code><span class="sig-paren">(</span><em>F</em>, <em>x</em>, <em>*args</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#Loss.hybrid_forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.Loss.hybrid_forward" title="Permalink to this definition"></a></dt>
<dd><p>Overrides to construct symbolic graph for this <cite>Block</cite>.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>x</strong> (<em>Symbol or NDArray</em>) – The first input tensor.</li>
<li><strong>*args</strong><p>Additional input tensors.</p>
</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.L2Loss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">L2Loss</code><span class="sig-paren">(</span><em>weight=1.0</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#L2Loss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.L2Loss" title="Permalink to this definition"></a></dt>
<dd><p>Calculates the mean squared error between <cite>pred</cite> and <cite>label</cite>.</p>
<div class="math">
\[L = \frac{1}{2} \sum_i \vert {pred}_i - {label}_i \vert^2.\]</div>
<p><cite>pred</cite> and <cite>label</cite> can have arbitrary shape as long as they have the same
number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape</li>
<li><strong>label</strong>: target tensor with the same size as pred.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.L1Loss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">L1Loss</code><span class="sig-paren">(</span><em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#L1Loss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.L1Loss" title="Permalink to this definition"></a></dt>
<dd><p>Calculates the mean absolute error between <cite>pred</cite> and <cite>label</cite>.</p>
<div class="math">
\[L = \sum_i \vert {pred}_i - {label}_i \vert.\]</div>
<p><cite>pred</cite> and <cite>label</cite> can have arbitrary shape as long as they have the same
number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape</li>
<li><strong>label</strong>: target tensor with the same size as pred.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">SigmoidBinaryCrossEntropyLoss</code><span class="sig-paren">(</span><em>from_sigmoid=False</em>, <em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#SigmoidBinaryCrossEntropyLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss" title="Permalink to this definition"></a></dt>
<dd><p>The cross-entropy loss for binary classification. (alias: SigmoidBCELoss)</p>
<p>BCE loss is useful when training logistic regression. If <cite>from_sigmoid</cite>
is False (default), this loss computes:</p>
<div class="math">
\[prob = \frac{1}{1 + \exp(-{pred})}\]\[L = - \sum_i {label}_i * \log({prob}_i) +
(1 - {label}_i) * \log(1 - {prob}_i)\]</div>
<p>If <cite>from_sigmoid</cite> is True, this loss computes:</p>
<div class="math">
\[L = - \sum_i {label}_i * \log({pred}_i) +
(1 - {label}_i) * \log(1 - {pred}_i)\]</div>
<p><cite>pred</cite> and <cite>label</cite> can have arbitrary shape as long as they have the same
number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>from_sigmoid</strong> (bool, default is <cite>False</cite>) – Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and BCE together, which is more numerically
stable through log-sum-exp trick.</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape</li>
<li><strong>label</strong>: target tensor with values in range <cite>[0, 1]</cite>. Must have the
same size as <cite>pred</cite>.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="attribute">
<dt id="mxnet.gluon.loss.SigmoidBCELoss">
<code class="descclassname">mxnet.gluon.loss.</code><code class="descname">SigmoidBCELoss</code><a class="headerlink" href="#mxnet.gluon.loss.SigmoidBCELoss" title="Permalink to this definition"></a></dt>
<dd><p>alias of <a class="reference internal" href="#mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss" title="mxnet.gluon.loss.SigmoidBinaryCrossEntropyLoss"><code class="xref py py-class docutils literal"><span class="pre">SigmoidBinaryCrossEntropyLoss</span></code></a></p>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.SoftmaxCrossEntropyLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">SoftmaxCrossEntropyLoss</code><span class="sig-paren">(</span><em>axis=-1</em>, <em>sparse_label=True</em>, <em>from_logits=False</em>, <em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#SoftmaxCrossEntropyLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.SoftmaxCrossEntropyLoss" title="Permalink to this definition"></a></dt>
<dd><p>Computes the softmax cross entropy loss. (alias: SoftmaxCELoss)</p>
<p>If <cite>sparse_label</cite> is <cite>True</cite> (default), label should contain integer
category indicators:</p>
<div class="math">
\[\DeclareMathOperator{softmax}{softmax}\]\[p = \softmax({pred})\]\[L = -\sum_i \log p_{i,{label}_i}\]</div>
<p><cite>label</cite>‘s shape should be <cite>pred</cite>‘s shape with the <cite>axis</cite> dimension removed.
i.e. for <cite>pred</cite> with shape (1,2,3,4) and <cite>axis = 2</cite>, <cite>label</cite>‘s shape should
be (1,2,4).</p>
<p>If <cite>sparse_label</cite> is <cite>False</cite>, <cite>label</cite> should contain probability distribution
and <cite>label</cite>‘s shape should be the same with <cite>pred</cite>:</p>
<div class="math">
\[p = \softmax({pred})\]\[L = -\sum_i \sum_j {label}_j \log p_{ij}\]</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>axis</strong> (<em>int, default -1</em>) – The axis to sum over when computing softmax and entropy.</li>
<li><strong>sparse_label</strong> (<em>bool, default True</em>) – Whether label is an integer array instead of probability distribution.</li>
<li><strong>from_logits</strong> (<em>bool, default False</em>) – Whether input is a log probability (usually from log_softmax) instead
of unnormalized numbers.</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: the prediction tensor, where the <cite>batch_axis</cite> dimension
ranges over batch size and <cite>axis</cite> dimension ranges over the number
of classes.</li>
<li><strong>label</strong>: the truth tensor. When <cite>sparse_label</cite> is True, <cite>label</cite>‘s
shape should be <cite>pred</cite>‘s shape with the <cite>axis</cite> dimension removed.
i.e. for <cite>pred</cite> with shape (1,2,3,4) and <cite>axis = 2</cite>, <cite>label</cite>‘s shape
should be (1,2,4) and values should be integers between 0 and 2. If
<cite>sparse_label</cite> is False, <cite>label</cite>‘s shape must be the same as <cite>pred</cite>
and values should be floats in the range <cite>[0, 1]</cite>.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as label. For example, if label has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="attribute">
<dt id="mxnet.gluon.loss.SoftmaxCELoss">
<code class="descclassname">mxnet.gluon.loss.</code><code class="descname">SoftmaxCELoss</code><a class="headerlink" href="#mxnet.gluon.loss.SoftmaxCELoss" title="Permalink to this definition"></a></dt>
<dd><p>alias of <a class="reference internal" href="#mxnet.gluon.loss.SoftmaxCrossEntropyLoss" title="mxnet.gluon.loss.SoftmaxCrossEntropyLoss"><code class="xref py py-class docutils literal"><span class="pre">SoftmaxCrossEntropyLoss</span></code></a></p>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.KLDivLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">KLDivLoss</code><span class="sig-paren">(</span><em>from_logits=True</em>, <em>axis=-1</em>, <em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#KLDivLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.KLDivLoss" title="Permalink to this definition"></a></dt>
<dd><p>The Kullback-Leibler divergence loss.</p>
<p>KL divergence measures the distance between contiguous distributions. It
can be used to minimize information loss when approximating a distribution.
If <cite>from_logits</cite> is True (default), loss is defined as:</p>
<div class="math">
\[L = \sum_i {label}_i * \big[\log({label}_i) - {pred}_i\big]\]</div>
<p>If <cite>from_logits</cite> is False, loss is defined as:</p>
<div class="math">
\[\DeclareMathOperator{softmax}{softmax}\]\[prob = \softmax({pred})\]\[L = \sum_i {label}_i * \big[\log({label}_i) - log({pred}_i)\big]\]</div>
<p><cite>pred</cite> and <cite>label</cite> can have arbitrary shape as long as they have the same
number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>from_logits</strong> (bool, default is <cite>True</cite>) – Whether the input is log probability (usually from log_softmax) instead
of unnormalized numbers.</li>
<li><strong>axis</strong> (<em>int, default -1</em>) – The dimension along with to compute softmax. Only used when <cite>from_logits</cite>
is False.</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape. If <cite>from_logits</cite> is
True, <cite>pred</cite> should be log probabilities. Otherwise, it should be
unnormalized predictions, i.e. from a dense layer.</li>
<li><strong>label</strong>: truth tensor with values in range <cite>(0, 1)</cite>. Must have
the same size as <cite>pred</cite>.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
<p class="rubric">References</p>
<p><a class="reference external" href="https://en.wikipedia.org/wiki/Kullback-Leibler_divergence">Kullback-Leibler divergence</a></p>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.CTCLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">CTCLoss</code><span class="sig-paren">(</span><em>layout='NTC'</em>, <em>label_layout='NT'</em>, <em>weight=None</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#CTCLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.CTCLoss" title="Permalink to this definition"></a></dt>
<dd><p>Connectionist Temporal Classification Loss.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>layout</strong> (<em>str, default 'NTC'</em>) – Layout of prediction tensor. ‘N’, ‘T’, ‘C’ stands for batch size,
sequence length, and alphabet_size respectively.</li>
<li><strong>label_layout</strong> (<em>str, default 'NT'</em>) – Layout of the labels. ‘N’, ‘T’ stands for batch size, and sequence
length respectively.</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: unnormalized prediction tensor (before softmax).
Its shape depends on <cite>layout</cite>. If <cite>layout</cite> is ‘TNC’, pred
should have shape <cite>(sequence_length, batch_size, alphabet_size)</cite>.
Note that in the last dimension, index <cite>alphabet_size-1</cite> is reserved
for internal use as blank label. So <cite>alphabet_size</cite> is one plus the
actual alphabet size.</li>
<li><strong>label</strong>: zero-based label tensor. Its shape depends on <cite>label_layout</cite>.
If <cite>label_layout</cite> is ‘TN’, <cite>label</cite> should have shape
<cite>(label_sequence_length, batch_size)</cite>.</li>
<li><strong>pred_lengths</strong>: optional (default None), used for specifying the
length of each entry when different <cite>pred</cite> entries in the same batch
have different lengths. <cite>pred_lengths</cite> should have shape <cite>(batch_size,)</cite>.</li>
<li><strong>label_lengths</strong>: optional (default None), used for specifying the
length of each entry when different <cite>label</cite> entries in the same batch
have different lengths. <cite>label_lengths</cite> should have shape <cite>(batch_size,)</cite>.</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: output loss has shape <cite>(batch_size,)</cite>.</li>
</ul>
</dd>
</dl>
<p><strong>Example</strong>: suppose the vocabulary is <cite>[a, b, c]</cite>, and in one batch we
have three sequences ‘ba’, ‘cbb’, and ‘abac’. We can index the labels as
<cite>{‘a’: 0, ‘b’: 1, ‘c’: 2, blank: 3}</cite>. Then <cite>alphabet_size</cite> should be 4,
where label 3 is reserved for internal use by <cite>CTCLoss</cite>. We then need to
pad each sequence with <cite>-1</cite> to make a rectangular <cite>label</cite> tensor:</p>
<div class="highlight-python"><div class="highlight"><pre><span></span><span class="p">[[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">],</span>
<span class="p">[</span><span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">],</span>
<span class="p">[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="mi">2</span><span class="p">]]</span>
</pre></div>
</div>
<p class="rubric">References</p>
<p><a class="reference external" href="http://www.cs.toronto.edu/~graves/icml_2006.pdf">Connectionist Temporal Classification: Labelling Unsegmented
Sequence Data with Recurrent Neural Networks</a></p>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.HuberLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">HuberLoss</code><span class="sig-paren">(</span><em>rho=1</em>, <em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#HuberLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.HuberLoss" title="Permalink to this definition"></a></dt>
<dd><p>Calculates smoothed L1 loss that is equal to L1 loss if absolute error
exceeds rho but is equal to L2 loss otherwise. Also called SmoothedL1 loss.</p>
<div class="math">
\[\begin{split}L = \sum_i \begin{cases} \frac{1}{2 {rho}} ({pred}_i - {label}_i)^2 &amp;
\text{ if } |{pred}_i - {label}_i| < {rho} \\
|{pred}_i - {label}_i| - \frac{{rho}}{2} &amp;
\text{ otherwise }
\end{cases}\end{split}\]</div>
<p><cite>pred</cite> and <cite>label</cite> can have arbitrary shape as long as they have the same
number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>rho</strong> (<em>float, default 1</em>) – Threshold for trimmed mean estimator.</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape</li>
<li><strong>label</strong>: target tensor with the same size as pred.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.HingeLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">HingeLoss</code><span class="sig-paren">(</span><em>margin=1</em>, <em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#HingeLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.HingeLoss" title="Permalink to this definition"></a></dt>
<dd><p>Calculates the hinge loss function often used in SVMs:</p>
<div class="math">
\[L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)\]</div>
<p>where <cite>pred</cite> is the classifier prediction and <cite>label</cite> is the target tensor
containing values -1 or 1. <cite>pred</cite> and <cite>label</cite> must have the same number of
elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>margin</strong> (<em>float</em>) – The margin in hinge loss. Defaults to 1.0</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape.</li>
<li><strong>label</strong>: truth tensor with values -1 or 1. Must have the same size
as pred.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.SquaredHingeLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">SquaredHingeLoss</code><span class="sig-paren">(</span><em>margin=1</em>, <em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#SquaredHingeLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.SquaredHingeLoss" title="Permalink to this definition"></a></dt>
<dd><p>Calculates the soft-margin loss function used in SVMs:</p>
<div class="math">
\[L = \sum_i max(0, {margin} - {pred}_i \cdot {label}_i)^2\]</div>
<p>where <cite>pred</cite> is the classifier prediction and <cite>label</cite> is the target tensor
containing values -1 or 1. <cite>pred</cite> and <cite>label</cite> can have arbitrary shape as
long as they have the same number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>margin</strong> (<em>float</em>) – The margin in hinge loss. Defaults to 1.0</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape</li>
<li><strong>label</strong>: truth tensor with values -1 or 1. Must have the same size
as pred.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.LogisticLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">LogisticLoss</code><span class="sig-paren">(</span><em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#LogisticLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.LogisticLoss" title="Permalink to this definition"></a></dt>
<dd><p>Calculates the logistic loss (for binary losses only):</p>
<div class="math">
\[L = \sum_i \log(1 + \exp(- {pred}_i \cdot {label}_i))\]</div>
<p>where <cite>pred</cite> is the classifier prediction and <cite>label</cite> is the target tensor
containing values -1 or 1. <cite>pred</cite> and <cite>label</cite> can have arbitrary shape as
long as they have the same number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape.</li>
<li><strong>label</strong>: truth tensor with values -1 or 1. Must have the same size
as pred.</li>
<li><strong>sample_weight</strong>: element-wise weighting tensor. Must be broadcastable
to the same shape as pred. For example, if pred has shape (64, 10)
and you want to weigh each sample in the batch separately,
sample_weight should have shape (64, 1).</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,). Dimenions other than
batch_axis are averaged out.</li>
</ul>
</dd>
</dl>
</dd></dl>
<dl class="class">
<dt id="mxnet.gluon.loss.TripletLoss">
<em class="property">class </em><code class="descclassname">mxnet.gluon.loss.</code><code class="descname">TripletLoss</code><span class="sig-paren">(</span><em>margin=1</em>, <em>weight=None</em>, <em>batch_axis=0</em>, <em>**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="../../../_modules/mxnet/gluon/loss.html#TripletLoss"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#mxnet.gluon.loss.TripletLoss" title="Permalink to this definition"></a></dt>
<dd><p>Calculates triplet loss given three input tensors and a positive margin.
Triplet loss measures the relative similarity between prediction, a positive
example and a negative example:</p>
<div class="math">
\[L = \sum_i \max(\Vert {pred}_i - {pos_i} \Vert_2^2 -
\Vert {pred}_i - {neg_i} \Vert_2^2 + {margin}, 0)\]</div>
<p><cite>pred</cite>, <cite>positive</cite> and <cite>negative</cite> can have arbitrary shape as long as they
have the same number of elements.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name"/>
<col class="field-body"/>
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>margin</strong> (<em>float</em>) – Margin of separation between correct and incorrect pair.</li>
<li><strong>weight</strong> (<em>float or None</em>) – Global scalar weight for loss.</li>
<li><strong>batch_axis</strong> (<em>int, default 0</em>) – The axis that represents mini-batch.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="docutils">
<dt>Inputs:</dt>
<dd><ul class="first last simple">
<li><strong>pred</strong>: prediction tensor with arbitrary shape</li>
<li><strong>positive</strong>: positive example tensor with arbitrary shape. Must have
the same size as pred.</li>
<li><strong>negative</strong>: negative example tensor with arbitrary shape Must have
the same size as pred.</li>
</ul>
</dd>
<dt>Outputs:</dt>
<dd><ul class="first last simple">
<li><strong>loss</strong>: loss tensor with shape (batch_size,).</li>
</ul>
</dd>
</dl>
</dd></dl>
<script>auto_index("api-reference");</script></div>
</div>
</div>
</div>
<div aria-label="main navigation" class="sphinxsidebar rightsidebar" role="navigation">
<div class="sphinxsidebarwrapper">
<h3><a href="../../../index.html">Table Of Contents</a></h3>
<ul>
<li><a class="reference internal" href="#">Gluon Loss API</a><ul>
<li><a class="reference internal" href="#overview">Overview</a></li>
<li><a class="reference internal" href="#api-reference">API Reference</a></li>
</ul>
</li>
</ul>
</div>
</div>
</div><div class="footer">
<div class="section-disclaimer">
<div class="container">
<div>
<img height="60" src="https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/image/apache_incubator_logo.png"/>
<p>
Apache MXNet is an effort undergoing incubation at The Apache Software Foundation (ASF), <strong>sponsored by the <i>Apache Incubator</i></strong>. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.
</p>
<p>
"Copyright © 2017, The Apache Software Foundation
Apache MXNet, MXNet, Apache, the Apache feather, and the Apache MXNet project logo are either registered trademarks or trademarks of the Apache Software Foundation."
</p>
</div>
</div>
</div>
</div> <!-- pagename != index -->
</div>
<script crossorigin="anonymous" integrity="sha384-0mSbJDEHialfmuBBQP6A4Qrprq5OVfW37PRR3j5ELqxss1yVqOtnepnHVP9aJ7xS" src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js"></script>
<script src="../../../_static/js/sidebar.js" type="text/javascript"></script>
<script src="../../../_static/js/search.js" type="text/javascript"></script>
<script src="../../../_static/js/navbar.js" type="text/javascript"></script>
<script src="../../../_static/js/clipboard.min.js" type="text/javascript"></script>
<script src="../../../_static/js/copycode.js" type="text/javascript"></script>
<script src="../../../_static/js/page.js" type="text/javascript"></script>
<script type="text/javascript">
$('body').ready(function () {
$('body').css('visibility', 'visible');
});
</script>
</body>
</html>